diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4072b9a --- /dev/null +++ b/.gitignore @@ -0,0 +1,37 @@ +# Binaries for programs and plugins +bin/ +dist/ + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool +*.out +coverage.html +coverage.txt + +# IDE and Editor files +.vscode/ +.idea/ +*.swp +*.swo +*~ +.DS_Store +*.iml + +# Temporary files +*.tmp +*.log + +# Kubernetes generated files +*.kubeconfig +kubeconfig + +# OS specific +.DS_Store +Thumbs.db + +# Local development +.env +.env.local +*.local \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..aeb8e9f --- /dev/null +++ b/Makefile @@ -0,0 +1,23 @@ +## Tool Versions +CONTROLLER_TOOLS_VERSION ?= v0.18.0 + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object paths="./..." + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd:allowDangerousTypes=true webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. +$(CONTROLLER_GEN): $(LOCALBIN) + test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) diff --git a/README.md b/README.md index 4cd0f93..297d0f5 100644 --- a/README.md +++ b/README.md @@ -1 +1,115 @@ # OpenEverest Provider SDK +A Go SDK for building database providers for the Everest platform. This SDK simplifies the creation of Kubernetes controllers that manage database lifecycle through the `DataStore` custom resource. + +## ๐ŸŽฏ Purpose of this PoC + +This repository contains a **proof-of-concept** implementation of the Provider SDK. The primary goals are: + +1. **Evaluate SDK usability** - Ensure the SDK is easy to use for provider developers +2. **Validate design decisions** - Test the proposed architecture with a real implementation +3. **Gather team feedback** - Enable the team to review and help improve the SDK + +## ๐Ÿ“š Documentation Guide + +| Document | Audience | Description | +|----------|----------|-------------| +| [SDK Overview](docs/SDK_OVERVIEW.md) | All reviewers | Understand the problem and SDK architecture | +| [Provider CR Generation](docs/PROVIDER_CR_GENERATION.md) | Developers | How to generate the Provider CR manifest | +| [Examples Guide](examples/README.md) | Developers | Walk through the PSMDB reference implementation | +| [Metadata Helpers](docs/METADATA_HELPERS.md) | Developers | Working with provider metadata | + +## ๐Ÿš€ Quick Start + +### Prerequisites + +- Go 1.21+ +- Access to a Kubernetes cluster (or use `kind`) +- `kubectl` configured + +### Run the PSMDB Example + +```bash +# Clone the repository +git clone https://github.com/openeverest/provider-sdk.git +cd provider-sdk + +# Install SDK CRDs (in production: auto-installed with Everest) +kubectl apply -f config/crd/bases/ + +# Install PSMDB operator (in production: packaged in provider Helm chart) +kubectl apply --server-side -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/bundle.yaml + +# Run the provider +cd examples/psmdb +go run cmd/provider/main.go +``` + +### Create a Test DataStore + +```bash +kubectl apply -f examples/datastore-simple.yaml +``` + +## ๐Ÿ“ Repository Structure + +``` +provider-sdk/ +โ”œโ”€โ”€ README.md # This file +โ”œโ”€โ”€ docs/ +โ”‚ โ”œโ”€โ”€ SDK_OVERVIEW.md # SDK architecture and concepts +โ”‚ โ”œโ”€โ”€ METADATA_HELPERS.md # Working with metadata +โ”‚ โ””โ”€โ”€ PROVIDER_CR_GENERATION.md # How to generate Provider manifests +โ”œโ”€โ”€ pkg/ +โ”‚ โ”œโ”€โ”€ apis/v2alpha1/ # CRD types (DataStore, Provider) +โ”‚ โ”œโ”€โ”€ controller/ # SDK core (Context handle, Status, etc.) +โ”‚ โ”œโ”€โ”€ reconciler/ # Reconciler implementations +โ”‚ โ””โ”€โ”€ server/ # HTTP server for schemas +โ”œโ”€โ”€ examples/ +โ”‚ โ””โ”€โ”€ psmdb/ # PSMDB provider example +โ”‚ โ”œโ”€โ”€ cmd/ +โ”‚ โ”‚ โ””โ”€โ”€ provider/ # Provider entrypoint +โ”‚ โ”œโ”€โ”€ internal/ # PSMDB business logic +โ”‚ โ””โ”€โ”€ psmdbspec/ # PSMDB types and schemas +โ””โ”€โ”€ config/crd/bases/ # CRD manifests +``` + +## ๐Ÿ” How to Review This PoC + +### For Decision Makers + +1. **Read the [SDK Overview](docs/SDK_OVERVIEW.md)** to understand the problem and approach +2. **Review the decision documents** in `docs/decisions/` +3. **Look at the [examples](examples/)** to see both approaches in action + +### For Developers + +1. **Start with [examples/README.md](examples/README.md)** for a hands-on walkthrough +2. **Examine the SDK code** in `pkg/controller/` - especially: + - [common.go](pkg/controller/common.go) - The `Context` handle abstraction + - [interface.go](pkg/controller/interface.go) - Provider interface types +3. **Run the examples** and create test DataStore resources + +### Questions to Consider + +When reviewing, please consider: + +1. **Usability**: Is the SDK easy to understand and use? +2. **API Design**: Is the interface design intuitive and idiomatic? +3. **Missing Features**: What's missing that would be needed for production? +4. **Naming**: Are the names (Context, Status, etc.) clear and appropriate? + +## ๐Ÿ“ Providing Feedback + +Please provide feedback through: +- GitHub Issues for specific problems or suggestions +- PR comments for code-level feedback +- Team discussions for design decisions + +## ๐Ÿ”— Related Links + +- [Everest Platform](https://github.com/percona/everest) - Main Everest repository +- [PSMDB Operator](https://github.com/percona/percona-server-mongodb-operator) - Percona MongoDB operator + +--- + +**Status**: Proof of Concept | **Version**: 0.1.0 \ No newline at end of file diff --git a/cmd/provider-sdk/main.go b/cmd/provider-sdk/main.go new file mode 100644 index 0000000..390209a --- /dev/null +++ b/cmd/provider-sdk/main.go @@ -0,0 +1,125 @@ +package main + +// Provider SDK CLI Tool +// +// This tool provides utilities for provider developers, including: +// - generate-manifest: Generate a Provider CR YAML from Go code +// +// Usage: +// provider-sdk generate-manifest --name --namespace --output +// +// See docs/PROVIDER_CR_GENERATION.md for detailed documentation. + +import ( + "flag" + "fmt" + "os" +) + +func main() { + if len(os.Args) < 2 { + printUsage() + os.Exit(1) + } + + switch os.Args[1] { + case "generate-manifest": + generateManifestCmd(os.Args[2:]) + case "help", "-h", "--help": + printUsage() + default: + fmt.Fprintf(os.Stderr, "Unknown command: %s\n", os.Args[1]) + printUsage() + os.Exit(1) + } +} + +func printUsage() { + fmt.Println(`Provider SDK CLI Tool + +Usage: + provider-sdk [options] + +Commands: + generate-manifest Generate a Provider CR YAML manifest from Go code + help Show this help message + +Use "provider-sdk -h" for more information about a command.`) +} + +func generateManifestCmd(args []string) { + fs := flag.NewFlagSet("generate-manifest", flag.ExitOnError) + name := fs.String("name", "", "Provider name (required)") + namespace := fs.String("namespace", "", "Namespace for the Provider CR (optional, omit for cluster-scoped)") + output := fs.String("output", "", "Output file path (default: stdout)") + + fs.Usage = func() { + fmt.Println(`Generate a Provider CR YAML manifest from Go code. + +This command is intended to be called from a Go generate directive in your +provider's main package. It reads the provider metadata from your Go code +and generates a YAML manifest that can be included in your Helm chart. + +Usage: + provider-sdk generate-manifest [options] + +Options:`) + fs.PrintDefaults() + fmt.Println(` +Example usage in your provider code: + + //go:generate provider-sdk generate-manifest --name percona-server-mongodb-operator --output ../../charts/provider/templates/provider.yaml + +The actual metadata is read from your provider implementation via a special +init mechanism. See the PSMDB example for details. + +See docs/PROVIDER_CR_GENERATION.md for complete workflow documentation.`) + } + + if err := fs.Parse(args); err != nil { + os.Exit(1) + } + + if *name == "" { + fmt.Fprintln(os.Stderr, "Error: --name is required") + fs.Usage() + os.Exit(1) + } + + // Note: In a real implementation, this would load the provider metadata + // from a compiled Go binary or through a plugin mechanism. + // For now, we provide a library function that providers call directly. + fmt.Fprintf(os.Stderr, `Note: This CLI is a placeholder for the generate-manifest workflow. + +In practice, provider developers should use the library function directly: + + // In your provider's gen.go file: + package main + + import ( + "os" + sdk "github.com/openeverest/provider-sdk/pkg/controller" + ) + + func main() { + metadata := defineMetadata() // Your metadata definition + yaml, err := metadata.ToYAML("%s", "%s") + if err != nil { + panic(err) + } + + // Write to file or stdout + if err := os.WriteFile("provider.yaml", []byte(yaml), 0644); err != nil { + panic(err) + } + } + +See examples/psmdb_interface.go for a complete example. +`, *name, *namespace) + + // For demonstration, we'll generate a template + if *output != "" { + fmt.Fprintf(os.Stderr, "Would write to: %s\n", *output) + } +} + diff --git a/config/crd/bases/everest.percona.com_datastores.yaml b/config/crd/bases/everest.percona.com_datastores.yaml new file mode 100644 index 0000000..4bf2b0f --- /dev/null +++ b/config/crd/bases/everest.percona.com_datastores.yaml @@ -0,0 +1,236 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: datastores.everest.percona.com +spec: + group: everest.percona.com + names: + kind: DataStore + listKind: DataStoreList + plural: datastores + shortNames: + - dst + - dstore + singular: datastore + scope: Namespaced + versions: + - name: v2alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + components: + additionalProperties: + properties: + config: + description: Config specifies the component specific configuration. + properties: + configMapRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + key: + type: string + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + customSpec: + description: |- + CustomSpec provides an API for customising this component. + The API schema is defined by the provider's ComponentSchemas. + type: object + x-kubernetes-preserve-unknown-fields: true + image: + description: |- + Image specifies an override for the image to use. + When unspecified, it is autmatically set from the ComponentVersions + based on the Version specified. + type: string + name: + description: Name of the component. + type: string + replicas: + description: Replicas specifies the number of replicas for this + component. + format: int32 + type: integer + resources: + description: Resources requirements for this component. + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + storage: + description: |- + Storage requirements for this component. + For stateless components, this is an optional field. + properties: + size: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + type: string + type: object + type: + description: Type of the component from the Provider. + type: string + version: + description: Version of the component from ComponentVersions. + type: string + type: object + description: |- + Components defines the component instances for this cluster. + The keys are component names (e.g., "engine", "proxy", "backupAgent"). + Which components are valid depends on the selected topology. + type: object + global: + description: |- + Global contains provider-level configuration that applies to the entire cluster. + The schema for this field is defined by the provider's GlobalSchema. + type: object + x-kubernetes-preserve-unknown-fields: true + provider: + description: Provider is the name of the database provider (e.g., + "psmdb", "postgresql"). + type: string + topology: + description: Topology defines the deployment topology and its configuration. + properties: + config: + description: |- + Config contains topology-specific configuration. + The schema for this field is defined by the provider's TopologyDefinition. + Examples: shard count for sharded topology, replication factor, etc. + type: object + x-kubernetes-preserve-unknown-fields: true + type: + description: |- + Type is the topology name (e.g., "sharded", "replicaset"). + The available topologies are defined by the provider. + If omitted, the provider's default topology is used. + type: string + type: object + type: object + status: + properties: + components: + description: Components is the status of the components in the database + cluster. + items: + properties: + pods: + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + ready: + format: int32 + type: integer + state: + type: string + total: + format: int32 + type: integer + type: object + type: array + connectionURL: + description: ConnectionURL is the URL to connect to the database cluster. + type: string + credentialSecretRef: + description: |- + CredentialSecretRef is a reference to the secret containing the credentials. + This Secret contains the keys `username` and `password`. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + phase: + description: Phase of the database cluster. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/everest.percona.com_providers.yaml b/config/crd/bases/everest.percona.com_providers.yaml new file mode 100644 index 0000000..c922e72 --- /dev/null +++ b/config/crd/bases/everest.percona.com_providers.yaml @@ -0,0 +1,85 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: providers.everest.percona.com +spec: + group: everest.percona.com + names: + kind: Provider + listKind: ProviderList + plural: providers + shortNames: + - prv + - prov + singular: provider + scope: Namespaced + versions: + - name: v2alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + componentTypes: + additionalProperties: + properties: + versions: + items: + properties: + default: + type: boolean + image: + type: string + version: + type: string + type: object + type: array + type: object + type: object + components: + additionalProperties: + properties: + type: + type: string + type: object + type: object + topologies: + additionalProperties: + properties: + components: + additionalProperties: + properties: + optional: + type: boolean + type: object + type: object + type: object + type: object + type: object + status: + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/docs/METADATA_HELPERS.md b/docs/METADATA_HELPERS.md new file mode 100644 index 0000000..d1f1e53 --- /dev/null +++ b/docs/METADATA_HELPERS.md @@ -0,0 +1,118 @@ +# Provider Metadata Helpers + +This document describes helper functions for working with provider metadata to look up component types, versions, and images. + +## Overview + +When implementing a provider, you often need to look up default images or versions for components. The SDK provides convenient helper functions through the `Context` handle. + +**Key point:** When you register metadata with your provider (via `BaseProvider.Metadata` or `WithMetadata()`), it becomes available through `c.Metadata()` in your provider functions. + +## Quick Reference + +```go +func SyncPSMDB(c *sdk.Context) error { + metadata := c.Metadata() + + // Get default image for a component type + image := metadata.GetDefaultImage("mongod") + // Returns: "percona/percona-server-mongodb:8.0.8-3" + + // Get full version info + version := metadata.GetDefaultVersion("mongod") + // Returns: &ComponentVersionMeta{Version: "8.0.8-3", Image: "...", Default: true} + + // Get component type for a logical component + componentType := metadata.GetComponentType("engine") + // Returns: "mongod" + + // Get default image for a logical component (combines above) + engineImage := metadata.GetDefaultImageForComponent("engine") + // Returns: "percona/percona-server-mongodb:8.0.8-3" +} +``` + +## Common Pattern: User Override with Default Fallback + +The most common use case is allowing users to override images while providing sensible defaults: + +```go +func SyncPSMDB(c *sdk.Context) error { + engine := c.DB().Spec.Components["engine"] + + var image string + if engine.Image != "" { + // User explicitly specified an image + image = engine.Image + } else if metadata := c.Metadata(); metadata != nil { + // Use the default from metadata + image = metadata.GetDefaultImage(engine.Type) + } + + psmdb := &psmdbv1.PerconaServerMongoDB{ + ObjectMeta: c.ObjectMeta(c.Name()), + Spec: psmdbv1.PerconaServerMongoDBSpec{ + Image: image, + }, + } + + return c.Apply(psmdb) +} +``` + +## Registering Metadata + +When creating your provider, register metadata using the `BaseProvider` struct: + +```go +func NewPSMDBProvider() *PSMDBProvider { + return &PSMDBProvider{ + BaseProvider: sdk.BaseProvider{ + ProviderName: "psmdb", + Metadata: PSMDBMetadata(), // Register metadata here + }, + } +} +``` + +The reconciler automatically detects that your provider implements `MetadataProvider` and makes the metadata available through `c.Metadata()` in all your sync, validate, status, and cleanup functions. + +## Metadata Structure + +```go +func PSMDBMetadata() *sdk.ProviderMetadata { + return &sdk.ProviderMetadata{ + // Component types define available versions and images + ComponentTypes: map[string]sdk.ComponentTypeMeta{ + "mongod": { + Versions: []sdk.ComponentVersionMeta{ + {Version: "6.0.19-16", Image: "percona/percona-server-mongodb:6.0.19-16"}, + {Version: "8.0.8-3", Image: "percona/percona-server-mongodb:8.0.8-3", Default: true}, + }, + }, + }, + + // Components map logical names to component types + Components: map[string]sdk.ComponentMeta{ + "engine": {Type: "mongod"}, + "configServer": {Type: "mongod"}, + }, + + // Topologies define valid deployment configurations + Topologies: map[string]sdk.TopologyMeta{ + "replicaset": { + Components: map[string]sdk.TopologyComponentMeta{ + "engine": {Optional: false}, + }, + }, + }, + } +} +``` + +## Related Documentation + +- [SDK Overview](SDK_OVERVIEW.md) - Architecture and concepts +- [Provider CR Generation](PROVIDER_CR_GENERATION.md) - How metadata is used for Provider CRs + + diff --git a/docs/PROVIDER_CR_GENERATION.md b/docs/PROVIDER_CR_GENERATION.md new file mode 100644 index 0000000..16f2064 --- /dev/null +++ b/docs/PROVIDER_CR_GENERATION.md @@ -0,0 +1,345 @@ +# Provider CR Generation Guide + +This document explains how to generate the Provider CR manifest that describes your provider's component types, versions, and topologies. + +## Overview + +Every provider needs a `Provider` CR (Custom Resource) that tells Everest: +- What component types are available (e.g., `mongod`, `postgres`) +- What versions are supported for each type +- What logical components use those types (e.g., `engine`, `proxy`) +- What topologies are supported (e.g., `replicaset`, `sharded`) + +The Provider CR is generated from metadata you define in Go code and included in your Helm chart. + +## Generation Flow + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” go run โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” git commit โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Go Metadata โ”‚ โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–บ โ”‚ provider.yaml โ”‚ โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ–บ โ”‚ Helm Chart โ”‚ +โ”‚ (source) โ”‚ generate- โ”‚ (manifest) โ”‚ โ”‚ (deployed) โ”‚ +โ”‚ โ”‚ manifest โ”‚ โ”‚ โ”‚ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Step 1: Define Metadata in Go + +First, define your provider's metadata using the SDK types: + +```go +// metadata.go +package main + +import sdk "github.com/openeverest/provider-sdk/pkg/controller" + +func PSMDBMetadata() *sdk.ProviderMetadata { + return &sdk.ProviderMetadata{ + // Component types define versions and images + ComponentTypes: map[string]sdk.ComponentTypeMeta{ + "mongod": { + Versions: []sdk.ComponentVersionMeta{ + { + Version: "6.0.19-16", + Image: "percona/percona-server-mongodb:6.0.19-16", + }, + { + Version: "8.0.8-3", + Image: "percona/percona-server-mongodb:8.0.8-3", + Default: true, // Mark default version + }, + }, + }, + "backup": { + Versions: []sdk.ComponentVersionMeta{ + { + Version: "2.5.0", + Image: "percona/percona-backup-mongodb:2.5.0", + Default: true, + }, + }, + }, + }, + + // Components map logical names to types + Components: map[string]sdk.ComponentMeta{ + "engine": {Type: "mongod"}, + "configServer": {Type: "mongod"}, + "backupAgent": {Type: "backup"}, + }, + + // Topologies define valid deployment configurations + Topologies: map[string]sdk.TopologyMeta{ + "replicaset": { + Components: map[string]sdk.TopologyComponentMeta{ + "engine": {Optional: false}, // Required + "backupAgent": {Optional: true}, // Optional + }, + }, + "sharded": { + Components: map[string]sdk.TopologyComponentMeta{ + "engine": {Optional: false}, + "configServer": {Optional: false}, + "backupAgent": {Optional: true}, + }, + }, + }, + } +} +``` + +## Step 2: Create Generation Tool + +Create a CLI tool to generate the manifest: + +```go +// cmd/generate-manifest/main.go +package main + +import ( + "flag" + "log" + "os" + + sdk "github.com/openeverest/provider-sdk/pkg/controller" +) + +func main() { + output := flag.String("output", "charts/provider.yaml", "Output file path") + name := flag.String("name", "psmdb", "Provider name") + namespace := flag.String("namespace", "", "Namespace (empty for cluster-scoped)") + flag.Parse() + + // Get your provider metadata + metadata := PSMDBMetadata() + + // Generate the YAML + yaml, err := sdk.GenerateManifest(metadata, *name, *namespace, *output) + if err != nil { + log.Fatalf("Failed to generate manifest: %v", err) + } + + log.Printf("Generated Provider CR at %s", *output) +} +``` + +## Step 3: Add to Build Process + +Add the generation step to your Makefile: + +```makefile +# Makefile + +# Generate the Provider CR manifest +.PHONY: generate-provider +generate-provider: + @echo "Generating Provider CR manifest..." + go run ./cmd/generate-manifest --output charts/provider.yaml --name psmdb + +# Make sure it runs before building +.PHONY: build +build: generate-provider + docker build -t my-provider:latest . + +# Add to your CI/CD verification +.PHONY: verify +verify: generate-provider + @git diff --exit-code charts/provider.yaml || \ + (echo "Error: provider.yaml is out of sync. Run 'make generate-provider'" && exit 1) +``` + +## Step 4: Include in Helm Chart + +Add the generated manifest to your Helm chart: + +```yaml +# charts/templates/provider.yaml +{{ .Files.Get "provider.yaml" }} +``` + +Or if you want to make it configurable: + +```yaml +# charts/templates/provider.yaml +{{- if .Values.provider.install }} +{{ .Files.Get "provider.yaml" }} +{{- end }} +``` + +**Production Deployment:** Your provider Helm chart should also include the underlying database operator as a dependency or bundled installation. For example, a PSMDB provider chart should install the Percona Server MongoDB Operator along with the Provider CR. This ensures all required components are deployed together. + +## Step 5: Commit the Generated File + +The generated `provider.yaml` should be committed to Git: + +```bash +# Generate the file +make generate-provider + +# Review the changes +git diff charts/provider.yaml + +# Commit it +git add charts/provider.yaml +git commit -m "Update Provider CR with new versions" +``` + +## Generated Output Example + +The tool generates a complete Provider CR like this: + +```yaml +apiVersion: everest.percona.com/v2alpha1 +kind: Provider +metadata: + name: psmdb +spec: + componentTypes: + mongod: + versions: + - version: "6.0.19-16" + image: "percona/percona-server-mongodb:6.0.19-16" + default: false + - version: "8.0.8-3" + image: "percona/percona-server-mongodb:8.0.8-3" + default: true + backup: + versions: + - version: "2.5.0" + image: "percona/percona-backup-mongodb:2.5.0" + default: true + components: + engine: + type: mongod + configServer: + type: mongod + backupAgent: + type: backup + topologies: + replicaset: + components: + engine: + optional: false + backupAgent: + optional: true + sharded: + components: + engine: + optional: false + configServer: + optional: false + backupAgent: + optional: true +``` + +## Best Practices + +### 1. Keep Metadata in Sync + +Your provider code should use the same metadata: + +```go +func NewPSMDBProvider() *PSMDBProvider { + return &PSMDBProvider{ + BaseProvider: sdk.BaseProvider{ + ProviderName: "psmdb", + Metadata: PSMDBMetadata(), // Same metadata! + }, + } +} +``` + +This ensures consistency and allows helper functions like `c.Metadata()` to work. + +### 2. Verify in CI/CD + +Add a check to ensure the manifest is always up-to-date: + +```yaml +# .github/workflows/ci.yml +- name: Verify Provider CR is up-to-date + run: | + make generate-provider + git diff --exit-code charts/provider.yaml +``` + +### 3. Version Your Images + +Use specific image tags, not `latest`: + +```go +{ + Version: "8.0.8-3", + Image: "percona/percona-server-mongodb:8.0.8-3", // โœ“ Good +} + +// Not this: +{ + Version: "latest", + Image: "percona/percona-server-mongodb:latest", // โœ— Bad +} +``` + +### 4. Mark Default Versions Explicitly + +```go +Versions: []sdk.ComponentVersionMeta{ + {Version: "6.0.19-16", Image: "...", Default: false}, + {Version: "8.0.8-3", Image: "...", Default: true}, // Clear default +} +``` + +### 5. Document Breaking Changes + +When updating topologies or component types, document the changes: + +```go +// v2.0.0: Removed "monitoring" component from replicaset topology +// v2.0.0: Added "proxy" component to sharded topology +``` + +### 6. Bundle Database Operator in Production + +Your provider Helm chart should include the underlying database operator: + +```yaml +# Chart.yaml +dependencies: + - name: percona-server-mongodb-operator + version: "1.21.1" + repository: "https://percona.github.io/percona-helm-charts" +``` + +Or include the operator manifests directly in your chart templates. This ensures the operator is installed automatically with your provider, rather than requiring manual installation. + +## Troubleshooting + +### Manifest Not Updating + +```bash +# Force regeneration +rm charts/provider.yaml +make generate-provider +``` + +### Invalid YAML + +The generator validates metadata before creating YAML. Check for: +- Duplicate default versions +- Invalid component type references +- Missing required fields + +### Helm Installation Fails + +Ensure the Provider CRD is installed first: + +```bash +kubectl apply -f config/crd/bases/everest.percona.com_providers.yaml +``` + +## Related Documentation + +- [SDK Overview](../SDK_OVERVIEW.md) - Architecture and concepts +- [Metadata Helpers](../METADATA_HELPERS.md) - Using metadata in your provider code +- [Examples](../../examples/README.md) - Complete PSMDB implementation +- [metadata.go](../../pkg/controller/metadata.go) - Metadata types reference + diff --git a/docs/SDK_OVERVIEW.md b/docs/SDK_OVERVIEW.md new file mode 100644 index 0000000..21b6e22 --- /dev/null +++ b/docs/SDK_OVERVIEW.md @@ -0,0 +1,294 @@ +# SDK Overview + +This document explains the problem the SDK solves, its architecture, and key concepts. + +## The Problem + +Everest aims to provide a unified, cloud-native database management experience across multiple database engines (PostgreSQL, MongoDB, MySQL, ClickHouse, etc.). Each database engine has its own Kubernetes operator with unique: + +- Custom Resource Definitions (CRDs) +- Reconciliation patterns +- Status reporting mechanisms +- Configuration schemas + +**The core challenge:** How do we enable database engine maintainers to integrate their operators with Everest without requiring deep Kubernetes controller expertise? + +### Without an SDK + +Without a proper SDK, provider authors face several challenges: + +| Pain Point | Description | +|------------|-------------| +| **Kubernetes complexity** | Authors must understand `context.Context`, `client.Client`, `reconcile.Request`, owner references, finalizers, and more | +| **Boilerplate code** | Each provider reimplements the same patterns: create-or-update, status mapping, cleanup logic | +| **Inconsistent implementations** | Without guidance, providers handle errors, retries, and status differently | +| **Steep learning curve** | New contributors need weeks to understand controller-runtime before writing provider logic | +| **Testing difficulty** | Tight coupling to Kubernetes makes unit testing painful | + +### The Gap + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ What Provider Authors Know โ”‚ +โ”‚ โ€ข How their database operator works โ”‚ +โ”‚ โ€ข What CRs their operator needs โ”‚ +โ”‚ โ€ข How to map DataStore spec to operator-specific config โ”‚ +โ”‚ โ€ข What status fields indicate healthy/unhealthy state โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ–ผ + โ•”โ•โ•โ•โ•โ•โ•โ•โ•— + โ•‘ GAP โ•‘ โ† This is what the SDK bridges + โ•šโ•โ•โ•โ•โ•โ•โ•โ• + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ What Kubernetes Controllers Require โ”‚ +โ”‚ โ€ข context.Context propagation โ”‚ +โ”‚ โ€ข client.Client for API operations โ”‚ +โ”‚ โ€ข Reconcile loops with proper requeue logic โ”‚ +โ”‚ โ€ข Owner references for garbage collection โ”‚ +โ”‚ โ€ข Finalizers for cleanup โ”‚ +โ”‚ โ€ข Watch configuration with predicates โ”‚ +โ”‚ โ€ข Status subresource updates โ”‚ +โ”‚ โ€ข Error handling and retry backoff โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## The Solution: Provider SDK + +The SDK bridges this gap by providing: + +1. **A simplified `Context` handle** - One object that provides everything a provider needs +2. **Automatic Kubernetes plumbing** - Finalizers, owner references, requeue logic handled automatically +3. **Semantic status helpers** - `Creating()`, `Running()`, `Failed()` instead of raw status structs +4. **Error-based flow control** - Use Go's idiomatic error handling, not custom result types + +### Before (Raw controller-runtime) - ~100+ lines + +```go +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + var db v2alpha1.DataStore + if err := r.Client.Get(ctx, req.NamespacedName, &db); err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + if db.DeletionTimestamp != nil { + if controllerutil.ContainsFinalizer(&db, finalizerName) { + // Complex cleanup logic with multiple API calls... + // Handle finalizer removal... + // Check for dependent resources... + } + return reconcile.Result{}, nil + } + + if !controllerutil.ContainsFinalizer(&db, finalizerName) { + controllerutil.AddFinalizer(&db, finalizerName) + if err := r.Client.Update(ctx, &db); err != nil { + return reconcile.Result{}, err + } + } + + // Create the operator CR with proper owner references... + // Update if exists, create if not... + // Check status and requeue if not ready... + // Update DataStore status... + // ... many more lines +} +``` + +### After (With SDK) - ~50 lines + +```go +type PSMDBProvider struct { + sdk.BaseProvider +} + +func (p *PSMDBProvider) Validate(c *sdk.Context) error { + // Just validation logic, nothing else + return nil +} + +func (p *PSMDBProvider) Sync(c *sdk.Context) error { + psmdb := &psmdbv1.PerconaServerMongoDB{ + ObjectMeta: c.ObjectMeta(c.Name()), + Spec: buildSpec(c), + } + return c.Apply(psmdb) // Owner ref set automatically +} + +func (p *PSMDBProvider) Status(c *sdk.Context) (sdk.Status, error) { + psmdb := &psmdbv1.PerconaServerMongoDB{} + if err := c.Get(psmdb, c.Name()); err != nil { + return sdk.Creating("Initializing"), nil + } + if psmdb.Status.State != "ready" { + return sdk.Creating("Starting"), nil + } + return sdk.Running(), nil +} + +func (p *PSMDBProvider) Cleanup(c *sdk.Context) error { + exists, _ := c.Exists(&psmdbv1.PerconaServerMongoDB{}, c.Name()) + if exists { + return sdk.WaitFor("PSMDB deletion") + } + return nil +} + +// Create reconciler +provider := &PSMDBProvider{ + BaseProvider: sdk.BaseProvider{ + ProviderName: "psmdb", + SchemeFuncs: []func(*runtime.Scheme) error{psmdbv1.AddToScheme}, + Owned: []client.Object{&psmdbv1.PerconaServerMongoDB{}}, + }, +} +reconciler, _ := reconciler.New(provider) +``` + +## SDK Architecture + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Provider Code โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ Validate() โ†’ Sync() โ†’ Status() โ†’ Cleanup() โ”‚ โ”‚ +โ”‚ โ”‚ (Your business logic - no Kubernetes complexity) โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ SDK Layer โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ Context โ”‚ โ”‚ Status โ”‚ โ”‚ WaitFor โ”‚ โ”‚ ObjectMeta โ”‚ โ”‚ +โ”‚ โ”‚ Handle โ”‚ โ”‚ Helpers โ”‚ โ”‚ Helpers โ”‚ โ”‚ Helpers โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ Reconciler Layer โ”‚ +โ”‚ โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ +โ”‚ โ”‚ โ€ข Finalizer management โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Owner reference handling โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Requeue logic โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Status updates โ”‚ โ”‚ +โ”‚ โ”‚ โ€ข Watch configuration โ”‚ โ”‚ +โ”‚ โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ controller-runtime โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Key Concepts + +### The Context Handle + +The `Context` struct is the main interface between your provider code and the SDK. It wraps: +- The Kubernetes client +- The current DataStore being reconciled +- Context for API operations +- Provider metadata (if configured) + +```go +func MySync(c *sdk.Context) error { + // Identity + c.Name() // DataStore name + c.Namespace() // DataStore namespace + + // Spec access + c.Spec() // Full spec + c.DB() // Underlying DataStore + c.Metadata() // Provider metadata + + // Resource operations (owner ref set automatically) + c.Apply(obj) // Create or update + c.Get(obj, name)// Read + c.Delete(obj) // Delete + c.Exists(obj, name) // Check existence + c.List(list) // List resources + + // Helpers + c.ObjectMeta(name) // Create ObjectMeta with owner ref +} +``` + +### Status Helpers + +Instead of manually constructing status structs, use semantic helpers: + +```go +// Creating state +return sdk.Creating("Waiting for primary node") + +// Running state +return sdk.Running() +return sdk.RunningWithConnection("mongodb://...", "secret-name") + +// Failed state +return sdk.Failed(fmt.Errorf("replication failed")) +``` + +### Flow Control + +Use errors for flow control - it's idiomatic Go: + +```go +func MySync(c *sdk.Context) error { + // Success - continue to next step + return nil + + // Wait and requeue + return sdk.WaitFor("resource to be ready") + + // Error - will be logged and reconciliation retried + return fmt.Errorf("failed to create resource: %w", err) +} +``` + +## What the SDK Handles Automatically + +| Concern | How SDK Handles It | +|---------|-------------------| +| **Finalizers** | Added automatically, removed after cleanup completes | +| **Owner references** | Set automatically by `Apply()` | +| **Requeue logic** | `WaitFor()` errors trigger requeue with backoff | +| **Error handling** | Errors are logged and trigger requeue | +| **Status updates** | Called after sync, updates status subresource | +| **Deletion handling** | Cleanup steps run when deletion timestamp is set | +| **Watch setup** | Configured from `Owns()` | +| **Scheme registration** | Types registered from `WithTypes()` | + +## Provider Lifecycle + +When a DataStore is created, modified, or deleted, the reconciler follows this flow: + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ DataStore Event โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ + โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Is DeletionTimestamp set? โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ”‚ โ”‚ + Yes No + โ”‚ โ”‚ + โ–ผ โ–ผ + โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” + โ”‚ Cleanup() โ”‚ โ”‚ Add Finalizer โ”‚ + โ”‚ Remove โ”‚ โ”‚ Validate() โ”‚ + โ”‚ Finalizer โ”‚ โ”‚ Sync() โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ โ”‚ Status() โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Next Steps + +- **[Provider CR Generation Guide](PROVIDER_CR_GENERATION.md)** - How to generate Provider manifests +- **[Examples Guide](../examples/README.md)** - See a working implementation + diff --git a/examples/psmdb/Makefile b/examples/psmdb/Makefile new file mode 100644 index 0000000..03c92b0 --- /dev/null +++ b/examples/psmdb/Makefile @@ -0,0 +1,34 @@ +## Tool Versions +OPENAPI_GEN_VERSION ?= v0.0.0-20250910181357-589584f1c912 + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/../../bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +OPENAPI_GEN ?= $(LOCALBIN)/openapi-gen + +.PHONY: run +run: ## Run the provider locally + go run cmd/provider/main.go + +.PHONY: generate-openapi +generate-openapi: openapi-gen ## Generate OpenAPI definitions for custom spec types + $(OPENAPI_GEN) \ + --output-dir ./types/generated \ + --output-pkg github.com/openeverest/provider-sdk/examples/psmdb/types/generated \ + --output-file zz_generated.openapi.go \ + --report-filename /dev/null \ + --go-header-file ../../hack/boilerplate.go.txt \ + github.com/openeverest/provider-sdk/examples/psmdb/types + +.PHONY: test-integration +test-integration: ## Run integration tests against K8S cluster + . ./test/vars.sh && kubectl kuttl test --config ./test/integration/kuttl.yaml + +.PHONY: openapi-gen +openapi-gen: $(OPENAPI_GEN) ## Download openapi-gen locally if necessary +$(OPENAPI_GEN): $(LOCALBIN) + test -s $(LOCALBIN)/openapi-gen || \ + GOBIN=$(LOCALBIN) go install k8s.io/kube-openapi/cmd/openapi-gen@$(OPENAPI_GEN_VERSION) + diff --git a/examples/psmdb/README.md b/examples/psmdb/README.md new file mode 100644 index 0000000..9cc56c2 --- /dev/null +++ b/examples/psmdb/README.md @@ -0,0 +1,281 @@ +# PSMDB Provider Example + +This directory contains a working implementation of a Percona Server MongoDB (PSMDB) provider using the SDK. + +## ๐Ÿ“ File Structure + +``` +examples/psmdb/ +โ”œโ”€โ”€ cmd/ +โ”‚ โ”œโ”€โ”€ provider/ # Provider entrypoint +โ”‚ โ”‚ โ””โ”€โ”€ main.go +โ”‚ โ””โ”€โ”€ generate-manifest/ +โ”‚ โ””โ”€โ”€ main.go # CLI tool to generate Provider CR manifest +โ”œโ”€โ”€ internal/ # PSMDB business logic +โ”‚ โ””โ”€โ”€ provider.go # ValidatePSMDB, SyncPSMDB, etc. +โ”œโ”€โ”€ psmdbspec/ # Custom spec types for PSMDB components +โ”‚ โ””โ”€โ”€ types.go +โ”œโ”€โ”€ test/ # Integration tests +โ”œโ”€โ”€ datastore-simple.yaml # Simple test DataStore manifest +โ””โ”€โ”€ datastore-example.yaml# Full DataStore manifest with all options +``` + +## ๐Ÿš€ Quick Start + +### Prerequisites + +1. A Kubernetes cluster (or `kind create cluster`) + +2. Install the SDK CRDs: + ```bash + kubectl apply -f ../../config/crd/bases/ + ``` + + **Note:** In production, these CRDs are automatically installed when installing Everest. + +3. Install the PSMDB operator: + ```bash + kubectl apply --server-side -f https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v1.21.1/deploy/bundle.yaml + ``` + + **Note:** This is a PoC requirement. In production, the underlying database operator (PSMDB in this case) should be packaged within the provider's Helm chart to ensure it installs automatically with the provider. + +### Generate the Provider CR + +Before running the provider, generate the Provider CR manifest: + +```bash +# Generate the Provider CR from Go metadata +go run ./cmd/generate-manifest/main.go > provider.yaml + +# This creates provider.yaml +# Install it in your cluster +kubectl apply -f provider.yaml +``` + +**Important:** The Provider CR must be created before the provider starts. This tells Everest what component types and versions your provider supports. + +See [Provider CR Generation Guide](../docs/PROVIDER_CR_GENERATION.md) for detailed instructions. + +### Run the Provider + +```bash +# From the examples/psmdb directory +go run cmd/provider/main.go +``` + +### Create a Test DataStore + +```bash +kubectl apply -f datastore-simple.yaml +``` + +Watch the provider logs and check the PSMDB resource: + +```bash +kubectl get psmdb +kubectl get datastore +``` + +## ๐Ÿ“– Understanding the Code + +### Business Logic (`internal/provider.go`) + +All provider logic is in `internal/provider.go`: + +```go +// Validate the DataStore spec +func ValidatePSMDB(c *sdk.Cluster) error { ... } + +// Create/update PSMDB resources +func SyncPSMDB(c *sdk.Cluster) error { ... } + +// Compute the current status +func StatusPSMDB(c *sdk.Cluster) (sdk.Status, error) { ... } + +// Handle cleanup on deletion +func CleanupPSMDB(c *sdk.Cluster) error { ... } +``` + +### Provider Implementation + +The provider implements the SDK interface: + +```go +type PSMDBProvider struct { + sdk.BaseProvider // Provides default implementations +} + +func NewPSMDBProviderInterface() *PSMDBProvider { + return &PSMDBProvider{ + BaseProvider: sdk.BaseProvider{ + ProviderName: "psmdb", + SchemeFuncs: []func(*runtime.Scheme) error{psmdbv1.AddToScheme}, + Owned: []client.Object{&psmdbv1.PerconaServerMongoDB{}}, + Metadata: PSMDBMetadata(), + }, + } +} + +// Implement the interface methods +func (p *PSMDBProvider) Validate(c *sdk.Cluster) error { return ValidatePSMDB(c) } +func (p *PSMDBProvider) Sync(c *sdk.Cluster) error { return SyncPSMDB(c) } +func (p *PSMDBProvider) Status(c *sdk.Cluster) (sdk.Status, error) { return StatusPSMDB(c) } +func (p *PSMDBProvider) Cleanup(c *sdk.Cluster) error { return CleanupPSMDB(c) } +``` + +**Key points:** +- Embed `sdk.BaseProvider` for defaults +- Implement `Validate`, `Sync`, `Status`, `Cleanup` +- Use `reconciler.New()` to create the reconciler + +## ๐Ÿ”ง Key SDK Concepts Demonstrated + +### The Cluster Handle + +The `*sdk.Cluster` is your main interface to everything: + +```go +func SyncPSMDB(c *sdk.Cluster) error { + // Get cluster info + name := c.Name() + namespace := c.Namespace() + spec := c.Spec() + + // Access the underlying DataStore + db := c.DB() + + // Get provider metadata + metadata := c.Metadata() + + // Create resources (owner reference set automatically) + psmdb := &psmdbv1.PerconaServerMongoDB{ + ObjectMeta: c.ObjectMeta(c.Name()), // Helper for ObjectMeta + Spec: buildSpec(c), + } + return c.Apply(psmdb) // Create or update +} +``` + +### Status Helpers + +Instead of raw status structs: + +```go +func StatusPSMDB(c *sdk.Cluster) (sdk.Status, error) { + psmdb := &psmdbv1.PerconaServerMongoDB{} + if err := c.Get(psmdb, c.Name()); err != nil { + return sdk.Creating("Waiting for PSMDB"), nil + } + + if psmdb.Status.State != "ready" { + return sdk.Creating("PSMDB is starting"), nil + } + + return sdk.RunningWithConnection( + fmt.Sprintf("mongodb://%s:27017", c.Name()), + c.Name() + "-credentials", + ), nil +} +``` + +### Flow Control with WaitFor + +```go +func CleanupPSMDB(c *sdk.Cluster) error { + exists, _ := c.Exists(&psmdbv1.PerconaServerMongoDB{}, c.Name()) + if exists { + return sdk.WaitFor("PSMDB deletion") // Requeue and wait + } + return nil // Done, continue cleanup +} +``` + +### Provider Metadata + +Metadata describes what your provider supports. This is used to generate the Provider CR: + +```go +func PSMDBMetadata() *sdk.ProviderMetadata { + return &sdk.ProviderMetadata{ + ComponentTypes: map[string]sdk.ComponentTypeMeta{ + "mongod": { + Versions: []sdk.ComponentVersionMeta{ + {Version: "8.0.8-3", Image: "percona/percona-server-mongodb:8.0.8-3", Default: true}, + {Version: "6.0.19-16", Image: "percona/percona-server-mongodb:6.0.19-16"}, + }, + }, + }, + Components: map[string]sdk.ComponentMeta{ + "engine": {Type: "mongod"}, + }, + Topologies: map[string]sdk.TopologyMeta{ + "replicaset": { + Components: map[string]sdk.TopologyComponentMeta{ + "engine": {Optional: false}, + }, + }, + }, + } +} +``` + +**Generating the Provider CR:** + +```bash +# Run the generation tool +go run ./cmd/generate-manifest/main.go + +# Output is written to charts/provider.yaml +# This file should be: +# 1. Committed to Git +# 2. Included in your Helm chart +# 3. Applied to the cluster before starting the provider +``` + +See [Provider CR Generation Guide](../docs/PROVIDER_CR_GENERATION.md) for more details. + +## ๐Ÿงช Running Integration Tests + +The `test/integration/` directory contains kuttl tests that verify the provider's behavior. + +### Prerequisites for Tests + +1. SDK CRDs installed (see Quick Start above) +2. PSMDB operator installed (see Quick Start above) +3. Provider running in the background: + ```bash + # In one terminal, start the provider: + go run cmd/provider/main.go + ``` + +### Running the Tests + +```bash +# From the examples directory: +make test-integration + +# Or run directly: +cd examples +. ./test/vars.sh && kubectl kuttl test --config ./test/integration/kuttl.yaml +``` + +**Note:** The tests assume the provider is already running and will create/update/delete DataStore resources to verify correct behavior. + +## ๐Ÿ“ Creating Your Own Provider + +To create a new provider: + +1. **Copy the structure** from this example +2. **Replace PSMDB types** with your operator's types +3. **Define your metadata** with component types and versions +4. **Generate the Provider CR** using the CLI tool +5. **Implement the four functions**: Validate, Sync, Status, Cleanup + +See the [SDK Overview](../docs/SDK_OVERVIEW.md) and [Provider CR Generation Guide](../docs/PROVIDER_CR_GENERATION.md) for detailed guidance. + +## ๐Ÿ”— Related Documentation + +- [SDK Overview](../docs/SDK_OVERVIEW.md) - Architecture and concepts +- [Provider CR Generation](../docs/PROVIDER_CR_GENERATION.md) - How to generate the Provider CR +- [Metadata Helpers](../docs/METADATA_HELPERS.md) - Working with metadata diff --git a/examples/psmdb/cmd/generate-manifest/main.go b/examples/psmdb/cmd/generate-manifest/main.go new file mode 100644 index 0000000..534056c --- /dev/null +++ b/examples/psmdb/cmd/generate-manifest/main.go @@ -0,0 +1,114 @@ +package main + +// PSMDB Provider Manifest Generator +// +// This tool generates the Provider CR YAML manifest from the Go-defined metadata. +// Run this as part of your build process to keep the manifest in sync. +// +// Usage: +// go run ./examples/cmd/generate-manifest +// +// Or add to Makefile: +// generate-manifest: +// go run ./examples/cmd/generate-manifest +// +// See docs/PROVIDER_CR_GENERATION.md for complete workflow documentation. + +import ( + "flag" + "fmt" + "os" + + sdk "github.com/openeverest/provider-sdk/pkg/controller" +) + +func main() { + output := flag.String("output", "", "Output file path (default: stdout)") + name := flag.String("name", "percona-server-mongodb-operator", "Provider name") + namespace := flag.String("namespace", "", "Namespace (empty for cluster-scoped)") + flag.Parse() + + // Define the metadata (same as in psmdb_interface.go) + metadata := psmdbMetadata() + + // Validate the metadata + if err := metadata.Validate(); err != nil { + fmt.Fprintf(os.Stderr, "Error: invalid metadata: %v\n", err) + os.Exit(1) + } + + if *output == "" { + // Write to stdout + if err := sdk.GenerateManifestToStdout(metadata, *name, *namespace); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + } else { + // Write to file + if err := sdk.GenerateManifest(metadata, *name, *namespace, *output); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + fmt.Fprintf(os.Stderr, "Generated: %s\n", *output) + } +} + +// psmdbMetadata returns the PSMDB provider metadata. +// This is the same metadata defined in psmdb_interface.go. +// In a real project, you might share this via a common package. +func psmdbMetadata() *sdk.ProviderMetadata { + return &sdk.ProviderMetadata{ + ComponentTypes: map[string]sdk.ComponentTypeMeta{ + "mongod": { + Versions: []sdk.ComponentVersionMeta{ + {Version: "6.0.19-16", Image: "percona/percona-server-mongodb:6.0.19-16-multi"}, + {Version: "6.0.21-18", Image: "percona/percona-server-mongodb:6.0.21-18"}, + {Version: "7.0.18-11", Image: "percona/percona-server-mongodb:7.0.18-11"}, + {Version: "8.0.4-1", Image: "percona/percona-server-mongodb:8.0.4-1-multi"}, + {Version: "8.0.8-3", Image: "percona/percona-server-mongodb:8.0.8-3", Default: true}, + }, + }, + "backup": { + Versions: []sdk.ComponentVersionMeta{ + {Version: "2.9.1", Image: "percona/percona-server-mongodb-backup:2.9.1", Default: true}, + }, + }, + "pmm": { + Versions: []sdk.ComponentVersionMeta{ + {Version: "2.44.1", Image: "percona/pmm-server:2.44.1", Default: true}, + }, + }, + }, + Components: map[string]sdk.ComponentMeta{ + "engine": {Type: "mongod"}, + "configServer": {Type: "mongod"}, + "proxy": {Type: "mongod"}, + "backupAgent": {Type: "backup"}, + "monitoring": {Type: "pmm"}, + }, + Topologies: map[string]sdk.TopologyMeta{ + "standard": { + Components: map[string]sdk.TopologyComponentMeta{ + "engine": { + Optional: false, + Defaults: map[string]interface{}{"replicas": 3}, + }, + "backupAgent": {Optional: true}, + "monitoring": {Optional: true}, + }, + }, + "sharded": { + Components: map[string]sdk.TopologyComponentMeta{ + "engine": { + Optional: false, + Defaults: map[string]interface{}{"replicas": 3}, + }, + "proxy": {Optional: false}, + "configServer": {Optional: false}, + "backupAgent": {Optional: true}, + "monitoring": {Optional: true}, + }, + }, + }, + } +} diff --git a/examples/psmdb/cmd/provider/main.go b/examples/psmdb/cmd/provider/main.go new file mode 100644 index 0000000..60404db --- /dev/null +++ b/examples/psmdb/cmd/provider/main.go @@ -0,0 +1,32 @@ +package main + +// PSMDB Provider +// +// This example shows how to implement a provider using the SDK interface. + +import ( + "fmt" + + provider "github.com/openeverest/provider-sdk/examples/psmdb/internal" + "github.com/openeverest/provider-sdk/pkg/reconciler" +) + +func main() { + provider := provider.NewPSMDBProviderInterface() + + r, err := reconciler.New(provider, + // Enable HTTP server for schema and validation endpoints + reconciler.WithServer(reconciler.ServerConfig{ + Port: 8082, + SchemaPath: "/schema", + ValidationPath: "/validate", + }), + ) + if err != nil { + panic(fmt.Errorf("failed to create reconciler: %w", err)) + } + + if err := r.StartWithSignalHandler(); err != nil { + panic(err) + } +} diff --git a/examples/psmdb/datastore-example.yaml b/examples/psmdb/datastore-example.yaml new file mode 100644 index 0000000..0cc3c67 --- /dev/null +++ b/examples/psmdb/datastore-example.yaml @@ -0,0 +1,120 @@ +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: psmdb-cluster + namespace: everest +spec: + # Provider identifier + provider: psmdb + + # Topology configuration + topology: + type: sharded + config: + # Topology-specific configuration (schema defined by provider) + shards: 3 + + # Global provider configuration + global: + allowUnsafeFlags: true + # Additional global settings... + + # Component instances + components: + # Main database engine (required for sharded topology) + engine: + type: mongod + version: 8.0.8-3 + replicas: 3 + storage: + size: 10Gi + storageClass: standard + resources: + cpu: 2 + memory: 4Gi + + # Proxy/router (required for sharded topology) + proxy: + type: mongos + version: 8.0.8-3 + replicas: 3 + service: + exposeType: LoadBalancer + + # Config server (required for sharded topology) + configServer: + type: mongod + version: 8.0.8-3 + replicas: 3 + storage: + size: 5Gi + + # Backup agent (optional) + backupAgent: + type: backup + version: 2.9.1 + replicas: 1 + + # Monitoring (optional) + monitoring: + type: pmm + version: 2.44.1 + config: + # Reference to monitoring configuration + secretRef: + name: pmm-config + key: pmm.yaml + +# status: +# phase: Running +# connectionURL: mongodb://psmdb-cluster:27017 +# credentialSecretRef: +# name: psmdb-cluster-credentials + +# conditions: +# - type: Ready +# status: "True" +# reason: AllComponentsHealthy +# message: "All components are healthy and running" +# lastTransitionTime: "2025-09-03T12:00:00Z" + +# awaitingTasks: +# - type: restart-psmdb-cluster +# message: "Restart required for CRVersion update" +# lastTransitionTime: "2025-09-03T12:00:00Z" +# params: +# updateCRVersion: true + +# components: +# - name: engine +# state: Ready +# total: 9 # 3 replicas ร— 3 shards +# ready: 9 +# pods: +# - name: psmdb-cluster-rs0-0 +# - name: psmdb-cluster-rs0-1 +# - name: psmdb-cluster-rs0-2 +# - name: psmdb-cluster-rs1-0 +# - name: psmdb-cluster-rs1-1 +# - name: psmdb-cluster-rs1-2 +# - name: psmdb-cluster-rs2-0 +# - name: psmdb-cluster-rs2-1 +# - name: psmdb-cluster-rs2-2 + +# - name: proxy +# state: Ready +# total: 3 +# ready: 3 +# pods: +# - name: psmdb-cluster-mongos-0 +# - name: psmdb-cluster-mongos-1 +# - name: psmdb-cluster-mongos-2 + +# - name: configServer +# state: Ready +# total: 3 +# ready: 3 +# pods: +# - name: psmdb-cluster-cfg-0 +# - name: psmdb-cluster-cfg-1 +# - name: psmdb-cluster-cfg-2 diff --git a/examples/psmdb/datastore-simple.yaml b/examples/psmdb/datastore-simple.yaml new file mode 100644 index 0000000..a932682 --- /dev/null +++ b/examples/psmdb/datastore-simple.yaml @@ -0,0 +1,46 @@ +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: psmdb-simple + namespace: everest +spec: + # Provider identifier + provider: psmdb + + # Topology is optional - will use provider's default (typically "replicaset") + # If omitted, the provider determines the default topology + + # Components with smart defaults + components: + # Engine is the only required component for default topology + engine: + type: mongod + # Version is optional - will use default from provider metadata + # version: 8.0.8-3 + replicas: 3 + storage: + size: 10Gi + + # Optional: backup agent + backupAgent: + type: backup + # version: 2.9.1 + replicas: 1 + + # Optional: monitoring + monitoring: + type: pmm + # version: 2.44.1 + +# status: +# phase: Running +# connectionURL: mongodb://psmdb-simple:27017 +# credentialSecretRef: +# name: psmdb-simple-credentials + +# conditions: +# - type: Ready +# status: "True" +# reason: AllComponentsHealthy +# message: "All components are healthy and running" +# lastTransitionTime: "2025-09-03T12:00:00Z" diff --git a/examples/psmdb/internal/provider.go b/examples/psmdb/internal/provider.go new file mode 100644 index 0000000..87e5e7c --- /dev/null +++ b/examples/psmdb/internal/provider.go @@ -0,0 +1,499 @@ +package provider + +// PSMDB Provider Implementation +// +// This file contains the business logic for the PSMDB provider. +// +// Key functions: +// - ValidatePSMDB: Validate DataStore spec +// - SyncPSMDB: Create/update PSMDB resources +// - StatusPSMDB: Compute cluster status +// - CleanupPSMDB: Handle deletion cleanup + +import ( + "fmt" + + "github.com/AlekSi/pointer" + "github.com/openeverest/provider-sdk/pkg/apis/v2alpha1" + sdk "github.com/openeverest/provider-sdk/pkg/controller" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + types "github.com/openeverest/provider-sdk/examples/psmdb/types" + psmdbv1 "github.com/percona/percona-server-mongodb-operator/pkg/apis/psmdb/v1" +) + +// Component name and type constants for PSMDB +const ( + ComponentEngine = "engine" + ComponentConfigServer = "configServer" + ComponentProxy = "proxy" + ComponentBackupAgent = "backupAgent" + ComponentMonitoring = "monitoring" + + ComponentTypeMongod = "mongod" + ComponentTypeBackup = "backup" + ComponentTypePMM = "pmm" +) + +const ( + psmdbDefaultConfigurationTemplate = ` + operationProfiling: + mode: slowOp +` + defaultBackupStartingTimeout = 120 +) + +var maxUnavailable = intstr.FromInt(1) + +func defaultSpec() psmdbv1.PerconaServerMongoDBSpec { + return psmdbv1.PerconaServerMongoDBSpec{ + UpdateStrategy: psmdbv1.SmartUpdateStatefulSetStrategyType, + UpgradeOptions: psmdbv1.UpgradeOptions{ + Apply: "disabled", + Schedule: "0 4 * * *", + SetFCV: true, + }, + PMM: psmdbv1.PMMSpec{}, + Replsets: []*psmdbv1.ReplsetSpec{ + { + Name: "rs0", + Configuration: psmdbv1.MongoConfiguration(psmdbDefaultConfigurationTemplate), + MultiAZ: psmdbv1.MultiAZ{ + PodDisruptionBudget: &psmdbv1.PodDisruptionBudgetSpec{ + MaxUnavailable: &maxUnavailable, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + }, + }, + Size: 3, + VolumeSpec: &psmdbv1.VolumeSpec{ + PersistentVolumeClaim: psmdbv1.PVCSpec{ + PersistentVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + }, + }, + Sharding: psmdbv1.Sharding{ + Enabled: false, + }, + VolumeExpansionEnabled: true, + // FIXME + CRVersion: "1.21.1", + } +} + +// ValidatePSMDB validates the DataStore spec for PSMDB. +func ValidatePSMDB(c *sdk.Context) error { + fmt.Println("Validating PSMDB cluster:", c.Name()) + // TODO: Add actual validation logic + // Example: Check for required components, validate storage sizes, etc. + return nil +} + +func configureReplset(name string, replicas *int32, resources *v2alpha1.Resources, storageSize *v2alpha1.Storage, expose bool) *psmdbv1.ReplsetSpec { + rsSpec := &psmdbv1.ReplsetSpec{ + Name: name, + Configuration: psmdbv1.MongoConfiguration(psmdbDefaultConfigurationTemplate), + MultiAZ: psmdbv1.MultiAZ{ + PodDisruptionBudget: &psmdbv1.PodDisruptionBudgetSpec{ + MaxUnavailable: &maxUnavailable, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + }, + }, + Size: 3, + VolumeSpec: &psmdbv1.VolumeSpec{ + PersistentVolumeClaim: psmdbv1.PVCSpec{ + PersistentVolumeClaimSpec: &corev1.PersistentVolumeClaimSpec{ + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10Gi"), + }, + }, + }, + }, + }, + Expose: psmdbv1.ExposeTogglable{ + Enabled: expose, + // TODO: implement exposing replset + Expose: psmdbv1.Expose{ + ExposeType: corev1.ServiceTypeClusterIP, + ServiceAnnotations: map[string]string{}, + }, + }, + } + + if replicas != nil { + rsSpec.Size = *replicas + } + if resources != nil && !resources.CPU.IsZero() { + rsSpec.MultiAZ.Resources.Limits[corev1.ResourceCPU] = resources.CPU + } + if resources != nil && !resources.Memory.IsZero() { + rsSpec.MultiAZ.Resources.Limits[corev1.ResourceMemory] = resources.Memory + } + if storageSize != nil && !storageSize.Size.IsZero() { + rsSpec.VolumeSpec.PersistentVolumeClaim.PersistentVolumeClaimSpec.Resources.Requests[corev1.ResourceStorage] = storageSize.Size + } + + return rsSpec +} + +func rsName(i int) string { + return fmt.Sprintf("rs%v", i) +} + +func configureReplsets(c *sdk.Context) []*psmdbv1.ReplsetSpec { + var replsets []*psmdbv1.ReplsetSpec + + ds := c.DB() + spec := ds.Spec + engine := spec.Components[ComponentEngine] + + // TODO: implement disabling + if spec.Topology == nil || spec.Topology.Type != "sharded" { + return []*psmdbv1.ReplsetSpec{ + configureReplset(rsName(0), engine.Replicas, engine.Resources, engine.Storage, true), + } + } + + numShards := 2 // default + var shardedConfig types.ShardedTopologyConfig + if c.TryDecodeTopologyConfig(&shardedConfig) && shardedConfig.NumShards > 0 { + numShards = int(shardedConfig.NumShards) + } + + // Create replsets for each shard + for i := 0; i < numShards; i++ { + replsets = append(replsets, configureReplset(rsName(i), engine.Replicas, engine.Resources, engine.Storage, false)) + } + + return replsets +} + +func configureConfigServerReplset(c *sdk.Context) *psmdbv1.ReplsetSpec { + var replset *psmdbv1.ReplsetSpec + + ds := c.DB() + spec := ds.Spec + cfgSrv := spec.Components[ComponentConfigServer] + + // TODO: implement disabling + if spec.Topology == nil || spec.Topology.Type != "sharded" { + return replset + } + + // TODO: check if this is okay. It adds the configuration, expose.type, + // name, podDisruptionBudget that we didn't have in the everest operator + return configureReplset("configsvr", cfgSrv.Replicas, cfgSrv.Resources, cfgSrv.Storage, false) +} + +func configureMongos(c *sdk.Context) *psmdbv1.MongosSpec { + ds := c.DB() + spec := ds.Spec + proxy := spec.Components[ComponentProxy] + + mongosSpec := &psmdbv1.MongosSpec{ + Size: 3, + MultiAZ: psmdbv1.MultiAZ{ + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{}, + }, + }, + } + + if proxy.Replicas != nil { + mongosSpec.Size = *proxy.Replicas + } + if proxy.Resources != nil && !proxy.Resources.CPU.IsZero() { + mongosSpec.MultiAZ.Resources.Limits[corev1.ResourceCPU] = proxy.Resources.CPU + } + if proxy.Resources != nil && !proxy.Resources.Memory.IsZero() { + mongosSpec.MultiAZ.Resources.Limits[corev1.ResourceMemory] = proxy.Resources.Memory + } + + // TODO: implement exposing mongos + mongosSpec.Expose = psmdbv1.MongosExpose{ + Expose: psmdbv1.Expose{ + ExposeType: corev1.ServiceTypeClusterIP, + ServiceAnnotations: map[string]string{}, + }, + } + + return mongosSpec +} + +func configureBackup(c *sdk.Context) psmdbv1.BackupSpec { + // TODO: Implement proper backup configuration + var backupImage string + if metadata := c.Metadata(); metadata != nil { + backupImage = metadata.GetDefaultImage("backup") + } else { + backupImage = PSMDBMetadata().GetDefaultImage("backup") + } + + return psmdbv1.BackupSpec{ + Enabled: true, + Image: backupImage, + PITR: psmdbv1.PITRSpec{ + Enabled: false, + }, + Configuration: psmdbv1.BackupConfig{ + BackupOptions: &psmdbv1.BackupOptions{ + Timeouts: &psmdbv1.BackupTimeouts{Starting: pointer.ToUint32(defaultBackupStartingTimeout)}, + }, + }, + + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1G"), + corev1.ResourceCPU: resource.MustParse("300m"), + }, + }, + } +} + +// SyncPSMDB ensures all PSMDB resources exist and are configured correctly. +func SyncPSMDB(c *sdk.Context) error { + fmt.Println("Syncing PSMDB cluster:", c.Name()) + psmdb := &psmdbv1.PerconaServerMongoDB{ + ObjectMeta: c.ObjectMeta(c.Name()), + Spec: defaultSpec(), + } + + // Get the engine component spec + engine := c.DB().Spec.Components[ComponentEngine] + // No need to check if engine is nil, it is guaranteed to be present by the validator + + // Set the image: use the user-specified image if provided, otherwise use the default from metadata + if engine.Image != "" { + // User explicitly specified an image + psmdb.Spec.Image = engine.Image + } else if metadata := c.Metadata(); metadata != nil { + // Look up the default image for the component type from the registered metadata + psmdb.Spec.Image = metadata.GetDefaultImage("mongod") + } else { + // Fallback: metadata not available, use PSMDBMetadata() directly + // This can happen in tests or when using NewContext instead of NewContextWithMetadata + psmdb.Spec.Image = PSMDBMetadata().GetDefaultImage(engine.Type) + } + psmdb.Spec.ImagePullPolicy = corev1.PullIfNotPresent + + psmdb.Spec.Replsets = configureReplsets(c) + if c.DB().Spec.Topology != nil && c.DB().Spec.Topology.Type == "sharded" { + psmdb.Spec.Sharding.Enabled = true + psmdb.Spec.Sharding.ConfigsvrReplSet = configureConfigServerReplset(c) + psmdb.Spec.Sharding.Mongos = configureMongos(c) + } + + psmdb.Spec.Backup = configureBackup(c) + + psmdb.Spec.Secrets = &psmdbv1.SecretsSpec{ + Users: "everest-secrets-" + c.Name(), + EncryptionKey: c.Name() + "-mongodb-encryption-key", + SSLInternal: c.Name() + "-ssl-internal", + } + + if err := c.Apply(psmdb); err != nil { + return err + } + fmt.Println("PSMDB cluster synced:", c.Name()) + return nil +} + +// StatusPSMDB computes the current status of the PSMDB cluster. +func StatusPSMDB(c *sdk.Context) (sdk.Status, error) { + // TODO: We probably shouldn't be querying the PSMDB object directly here; + // It can lead to a race condition where we are setting the status based on + // new data whereas the sync used older data. + // Should the SDK be responsible for fetching and caching the PSMDB object + // to ensure we only get it once during the reconcile? + psmdb := &psmdbv1.PerconaServerMongoDB{} + if err := c.Get(psmdb, c.Name()); err != nil { + return sdk.Creating("Waiting for PerconaServerMongoDB"), nil + } + switch psmdb.Status.State { + case psmdbv1.AppStateReady: + return sdk.Running(), nil + case psmdbv1.AppStateError: + return sdk.Failed(psmdb.Status.Message), nil + default: + return sdk.Creating("Cluster is being created"), nil + } +} + +// CleanupPSMDB handles deletion of the PSMDB cluster. +func CleanupPSMDB(c *sdk.Context) error { + fmt.Println("Cleaning up PSMDB cluster:", c.Name()) + // TODO: Implemenent handling of finalizers + psmdb := &psmdbv1.PerconaServerMongoDB{ + ObjectMeta: c.ObjectMeta(c.Name()), + } + if err := c.Delete(psmdb); err != nil { + return err + } + fmt.Println("PSMDB cluster cleaned up:", c.Name()) + return nil +} + +// ============================================================================= +// PROVIDER METADATA +// ============================================================================= + +// PSMDBTopologyDefinitions returns the topology definitions for PSMDB. +// This is shared by all provider implementations to maintain a single source of truth. +func PSMDBTopologyDefinitions() map[string]sdk.TopologyDefinition { + return map[string]sdk.TopologyDefinition{ + string(types.TopologyTypeReplicaSet): { + Schema: &types.ReplicaSetTopologyConfig{}, + Components: map[string]sdk.TopologyComponentDefinition{ + ComponentEngine: {Optional: false, Defaults: map[string]interface{}{"replicas": 3}}, + ComponentBackupAgent: {Optional: true}, + ComponentMonitoring: {Optional: true}, + }, + }, + string(types.TopologyTypeSharded): { + Schema: &types.ShardedTopologyConfig{}, + Components: map[string]sdk.TopologyComponentDefinition{ + ComponentEngine: {Optional: false, Defaults: map[string]interface{}{"replicas": 3}}, + ComponentProxy: {Optional: false}, + ComponentConfigServer: {Optional: false}, + ComponentBackupAgent: {Optional: true}, + ComponentMonitoring: {Optional: true}, + }, + }, + } +} + +// PSMDBMetadata returns the metadata for the PSMDB provider. +// This defines the component types, versions, components, and topologies +// that the provider supports. +// +// This metadata is shared by all PSMDB provider examples and is used for: +// - CLI generation: `go run ./cmd/generate-manifest` -> provider.yaml (for Helm) +// - Runtime metadata access via c.Metadata() +// +// Note: The topologies are derived from the shared PSMDBTopologyDefinitions() +// to maintain a single source of truth across all provider implementations. +func PSMDBMetadata() *sdk.ProviderMetadata { + // Define component types and logical components + metadata := &sdk.ProviderMetadata{ + // ComponentTypes defines the available component types with their versions. + // Each component type represents a different image/binary that can be deployed. + ComponentTypes: map[string]sdk.ComponentTypeMeta{ + // mongod is the main MongoDB server component + ComponentTypeMongod: { + Versions: []sdk.ComponentVersionMeta{ + {Version: "6.0.19-16", Image: "percona/percona-server-mongodb:6.0.19-16-multi"}, + {Version: "6.0.21-18", Image: "percona/percona-server-mongodb:6.0.21-18"}, + {Version: "7.0.18-11", Image: "percona/percona-server-mongodb:7.0.18-11"}, + {Version: "8.0.4-1", Image: "percona/percona-server-mongodb:8.0.4-1-multi"}, + {Version: "8.0.8-3", Image: "percona/percona-server-mongodb:8.0.8-3", Default: true}, + }, + }, + // backup is the backup agent component + ComponentTypeBackup: { + Versions: []sdk.ComponentVersionMeta{ + {Version: "2.9.1", Image: "percona/percona-server-mongodb-backup:2.9.1", Default: true}, + }, + }, + // pmm is the Percona Monitoring and Management component + ComponentTypePMM: { + Versions: []sdk.ComponentVersionMeta{ + {Version: "2.44.1", Image: "percona/pmm-server:2.44.1", Default: true}, + }, + }, + }, + + // Components defines the logical components that use the component types. + // Multiple components can reference the same component type (e.g., engine and configServer both use mongod). + Components: map[string]sdk.ComponentMeta{ + ComponentEngine: {Type: ComponentTypeMongod}, // Main database engine + ComponentConfigServer: {Type: ComponentTypeMongod}, // Config server for sharded clusters + ComponentProxy: {Type: ComponentTypeMongod}, // Proxy/mongos for sharded clusters + ComponentBackupAgent: {Type: ComponentTypeBackup}, // Backup agent + ComponentMonitoring: {Type: ComponentTypePMM}, // Monitoring agent + }, + } + + // Derive topologies from the shared topology definitions + metadata.Topologies = sdk.TopologiesFromSchemaProvider(PSMDBTopologyDefinitions()) + + return metadata +} + +// PSMDBProvider implements the sdk.ProviderInterface interface. +type PSMDBProvider struct { + sdk.BaseProvider +} + +// NewPSMDBProviderInterface creates a new PSMDB provider. +func NewPSMDBProviderInterface() *PSMDBProvider { + return &PSMDBProvider{ + BaseProvider: sdk.BaseProvider{ + ProviderName: "psmdb", + SchemeFuncs: []func(*runtime.Scheme) error{ + psmdbv1.SchemeBuilder.AddToScheme, + }, + Owned: []client.Object{ + &psmdbv1.PerconaServerMongoDB{}, + }, + Metadata: PSMDBMetadata(), + }, + } +} + +// Interface implementation - delegates to shared functions in psmdb_impl.go + +func (p *PSMDBProvider) Validate(c *sdk.Context) error { + return ValidatePSMDB(c) +} + +func (p *PSMDBProvider) Sync(c *sdk.Context) error { + return SyncPSMDB(c) +} + +func (p *PSMDBProvider) Status(c *sdk.Context) (sdk.Status, error) { + return StatusPSMDB(c) +} + +func (p *PSMDBProvider) Cleanup(c *sdk.Context) error { + return CleanupPSMDB(c) +} + +// Compile-time interface checks +var _ sdk.ProviderInterface = (*PSMDBProvider)(nil) +var _ sdk.MetadataProvider = (*PSMDBProvider)(nil) +var _ sdk.SchemaProvider = (*PSMDBProvider)(nil) + +// SchemaProvider implementation for OpenAPI schema generation + +func (p *PSMDBProvider) ComponentSchemas() map[string]interface{} { + return map[string]interface{}{ + ComponentEngine: &types.MongodCustomSpec{}, + ComponentConfigServer: &types.MongodCustomSpec{}, + ComponentProxy: &types.MongosCustomSpec{}, + ComponentBackupAgent: &types.BackupCustomSpec{}, + ComponentMonitoring: &types.PMMCustomSpec{}, + } +} + +func (p *PSMDBProvider) Topologies() map[string]sdk.TopologyDefinition { + return PSMDBTopologyDefinitions() +} + +func (p *PSMDBProvider) GlobalSchema() interface{} { + return &types.GlobalConfig{} +} diff --git a/examples/psmdb/provider.yaml b/examples/psmdb/provider.yaml new file mode 100644 index 0000000..66f60c8 --- /dev/null +++ b/examples/psmdb/provider.yaml @@ -0,0 +1,61 @@ +# Provider CR generated from Go code +# Do not edit manually - regenerate using: provider-sdk generate-manifest +--- +apiVersion: everest.percona.com/v2alpha1 +kind: Provider +metadata: + name: percona-server-mongodb-operator +spec: + componentTypes: + backup: + versions: + - default: true + image: percona/percona-server-mongodb-backup:2.9.1 + version: 2.9.1 + mongod: + versions: + - image: percona/percona-server-mongodb:6.0.19-16-multi + version: 6.0.19-16 + - image: percona/percona-server-mongodb:6.0.21-18 + version: 6.0.21-18 + - image: percona/percona-server-mongodb:7.0.18-11 + version: 7.0.18-11 + - image: percona/percona-server-mongodb:8.0.4-1-multi + version: 8.0.4-1 + - default: true + image: percona/percona-server-mongodb:8.0.8-3 + version: 8.0.8-3 + pmm: + versions: + - default: true + image: percona/pmm-server:2.44.1 + version: 2.44.1 + components: + backupAgent: + type: backup + configServer: + type: mongod + engine: + type: mongod + monitoring: + type: pmm + proxy: + type: mongod + topologies: + sharded: + components: + backupAgent: + optional: true + configServer: {} + engine: {} + monitoring: + optional: true + proxy: {} + standard: + components: + backupAgent: + optional: true + engine: {} + monitoring: + optional: true +status: {} diff --git a/examples/psmdb/test/integration/core/00-assert.yaml b/examples/psmdb/test/integration/core/00-assert.yaml new file mode 100644 index 0000000..a7f6c08 --- /dev/null +++ b/examples/psmdb/test/integration/core/00-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 30 +# collectors: +# - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml +# - command: kubectl get databaseengines/percona-server-mongodb-operator -n ${NAMESPACE} -o yaml +# - type: pod +# namespace: everest-system +# selector: control-plane=controller-manager +# tail: 100 +# commands: +# - command: kubectl wait --for=jsonpath='{.status.operatorVersion}'=${PSMDB_OPERATOR_VERSION} databaseengines/percona-server-mongodb-operator -n ${NAMESPACE} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: percona-server-mongodb-operator +spec: + replicas: 0 +# --- +# apiVersion: everest.percona.com/v1alpha1 +# kind: DatabaseEngine +# metadata: +# name: percona-server-mongodb-operator +# spec: +# type: psmdb +# status: +# status: installed + diff --git a/examples/psmdb/test/integration/core/00-install.yaml b/examples/psmdb/test/integration/core/00-install.yaml new file mode 100644 index 0000000..d412e3f --- /dev/null +++ b/examples/psmdb/test/integration/core/00-install.yaml @@ -0,0 +1,8 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + # # Need to patch KUTTL's namespace to add the label so that the Everest Operator can reconcile resources from it. + # - command: kubectl patch ns ${NAMESPACE} -p '{"metadata":{"labels":{"app.kubernetes.io/managed-by":"everest"}}}' --type merge + - script: "curl -fsSL https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v${PSMDB_OPERATOR_VERSION}/deploy/operator.yaml | sed 's/replicas: [0-9]/replicas: 0/g' | kubectl apply -n ${NAMESPACE} -f -" + diff --git a/examples/psmdb/test/integration/core/10-assert.yaml b/examples/psmdb/test/integration/core/10-assert.yaml new file mode 100644 index 0000000..6656b5e --- /dev/null +++ b/examples/psmdb/test/integration/core/10-assert.yaml @@ -0,0 +1,144 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 5 +# timeout: 30 +collectors: + - command: kubectl get dst/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + +# - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml +# - type: pod +# namespace: everest-system +# selector: control-plane=controller-manager +# tail: 100 +# commands: +# - command: kubectl wait --for=jsonpath='{.status.crVersion}'=${PSMDB_OPERATOR_VERSION} dst/test-psmdb-cluster -n ${NAMESPACE} +# - command: kubectl wait --for=jsonpath='{.spec.engine.version}'=${PSMDB_DB_ENGINE_VERSION} dst/test-psmdb-cluster -n ${NAMESPACE} +# - command: kubectl wait --for=jsonpath='{.spec.image}'="percona/percona-server-mongodb:${PSMDB_DB_ENGINE_VERSION}" psmdb/test-psmdb-cluster -n ${NAMESPACE} +resourceRefs: + - apiVersion: everest.percona.com/v2alpha1 + kind: DataStore + name: test-psmdb-cluster + ref: dst + - apiVersion: psmdb.percona.com/v1 + kind: PerconaServerMongoDB + name: test-psmdb-cluster + ref: psmdb +assertAll: + # - celExpr: "has(dst.metadata.finalizers)" + # message: "dst doesn't have finalizers" + + # - celExpr: "'everest.percona.com/upstream-cluster-cleanup' in dst.metadata.finalizers" + # message: "'everest.percona.com/upstream-cluster-cleanup' is absent in dst.metadata.finalizers" + + # - celExpr: "'foregroundDeletion' in dst.metadata.finalizers" + # message: "foregroundDeletion' is absent in dst.metadata.finalizers" + + # - celExpr: "has(psmdb.metadata.finalizers)" + # message: "psmdb doesn't have finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pods-in-order' in psmdb.metadata.finalizers" + # message: "'percona.com/delete-psmdb-pods-in-order' is absent in psmdb.metadata.finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pvc' in psmdb.metadata.finalizers" + # message: "percona.com/delete-psmdb-pvc' is absent in psmdb.metadata.finalizers" + + - celExpr: "!has(psmdb.spec.pmm.enabled)" + message: "psmdb.spec.pmm.enabled is not empty" + + - celExpr: "!has(psmdb.spec.pmm.serverHost)" + message: "psmdb.spec.pmm.serverHost is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.limits)" + message: "psmdb.spec.pmm.resources.limits is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.requests)" + message: "psmdb.spec.pmm.resources.requests is not empty" +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + components: + engine: + type: mongod + replicas: 3 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + # engine: + # replicas: 3 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # userSecretsName: everest-secrets-test-psmdb-cluster + # proxy: + # expose: + # type: internal +--- +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: test-psmdb-cluster +spec: + imagePullPolicy: IfNotPresent + backup: + configuration: + backupOptions: + oplogSpanMin: 0 + timeouts: + startingStatus: 120 + enabled: true + pitr: {} + resources: + limits: + cpu: 300m + memory: 1G + multiCluster: + enabled: false + replsets: + - arbiter: + enabled: false + resources: {} + size: 0 + expose: + enabled: true + type: ClusterIP + name: rs0 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + secrets: + encryptionKey: test-psmdb-cluster-mongodb-encryption-key + sslInternal: test-psmdb-cluster-ssl-internal + users: everest-secrets-test-psmdb-cluster + sharding: + balancer: {} + enabled: false + unsafeFlags: {} + updateStrategy: SmartUpdate + upgradeOptions: + apply: disabled + schedule: 0 4 * * * + setFCV: true + diff --git a/examples/psmdb/test/integration/core/10-create-cluster.yaml b/examples/psmdb/test/integration/core/10-create-cluster.yaml new file mode 100644 index 0000000..270b67d --- /dev/null +++ b/examples/psmdb/test/integration/core/10-create-cluster.yaml @@ -0,0 +1,32 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + components: + engine: + type: mongod + replicas: 3 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + # engine: + # crVersion: 1.21.1 + # replicas: 3 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # proxy: + # expose: + # type: internal + diff --git a/examples/psmdb/test/integration/core/11-assert.yaml b/examples/psmdb/test/integration/core/11-assert.yaml new file mode 100644 index 0000000..4024286 --- /dev/null +++ b/examples/psmdb/test/integration/core/11-assert.yaml @@ -0,0 +1,136 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 5 +#timeout: 30 +collectors: + - command: kubectl get dst/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + + # - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml + # - type: pod + # namespace: everest-system + # selector: control-plane=controller-manager + # tail: 100 +resourceRefs: + - apiVersion: everest.percona.com/v2alpha1 + kind: DataStore + name: test-psmdb-cluster + ref: dst + - apiVersion: psmdb.percona.com/v1 + kind: PerconaServerMongoDB + name: test-psmdb-cluster + ref: psmdb +assertAll: + # - celExpr: "has(db.metadata.finalizers)" + # message: "db doesn't have finalizers" + + # - celExpr: "'everest.percona.com/upstream-cluster-cleanup' in db.metadata.finalizers" + # message: "'everest.percona.com/upstream-cluster-cleanup' is absent in db.metadata.finalizers" + + # - celExpr: "'foregroundDeletion' in db.metadata.finalizers" + # message: "foregroundDeletion' is absent in db.metadata.finalizers" + + - celExpr: "!has(psmdb.spec.pmm.enabled)" + message: "psmdb.spec.pmm.enabled is not empty" + + - celExpr: "!has(psmdb.spec.pmm.serverHost)" + message: "psmdb.spec.pmm.serverHost is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.limits)" + message: "psmdb.spec.pmm.resources.limits is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.requests)" + message: "psmdb.spec.pmm.resources.requests is not empty" +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + components: + engine: + type: mongod + replicas: 3 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + # engine: + # replicas: 3 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # userSecretsName: everest-secrets-test-psmdb-cluster + # proxy: + # expose: + # type: internal +status: + # status: ready + phase: Running +--- +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: test-psmdb-cluster +spec: + imagePullPolicy: IfNotPresent + backup: + configuration: + backupOptions: + oplogSpanMin: 0 + timeouts: + startingStatus: 120 + enabled: true + pitr: {} + resources: + limits: + cpu: 300m + memory: 1G + multiCluster: + enabled: false + replsets: + - arbiter: + enabled: false + resources: {} + size: 0 + expose: + enabled: true + type: ClusterIP + name: rs0 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + secrets: + encryptionKey: test-psmdb-cluster-mongodb-encryption-key + sslInternal: test-psmdb-cluster-ssl-internal + users: everest-secrets-test-psmdb-cluster + sharding: + balancer: {} + enabled: false + unsafeFlags: {} + updateStrategy: SmartUpdate + upgradeOptions: + apply: disabled + schedule: 0 4 * * * + setFCV: true +status: + state: ready + diff --git a/examples/psmdb/test/integration/core/11-cluster-ready.yaml b/examples/psmdb/test/integration/core/11-cluster-ready.yaml new file mode 100644 index 0000000..87a2917 --- /dev/null +++ b/examples/psmdb/test/integration/core/11-cluster-ready.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + - command: kubectl patch psmdb/test-psmdb-cluster --subresource status -n $NAMESPACE -p '{"status":{"state":"ready", "size":1, "ready":1}}' --type=merge diff --git a/examples/psmdb/test/integration/core/20-assert.yaml b/examples/psmdb/test/integration/core/20-assert.yaml new file mode 100644 index 0000000..48e2407 --- /dev/null +++ b/examples/psmdb/test/integration/core/20-assert.yaml @@ -0,0 +1,145 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 5 +#timeout: 30 +collectors: + - command: kubectl get dbc/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + + # - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml + # - type: pod + # namespace: everest-system + # selector: control-plane=controller-manager + # tail: 100 +resourceRefs: + - apiVersion: everest.percona.com/v2alpha1 + kind: DataStore + name: test-psmdb-cluster + ref: db + - apiVersion: psmdb.percona.com/v1 + kind: PerconaServerMongoDB + name: test-psmdb-cluster + ref: psmdb +assertAll: + # - celExpr: "has(db.metadata.finalizers)" + # message: "db doesn't have finalizers" + + # - celExpr: "'everest.percona.com/upstream-cluster-cleanup' in db.metadata.finalizers" + # message: "'everest.percona.com/upstream-cluster-cleanup' is absent in db.metadata.finalizers" + + # - celExpr: "'foregroundDeletion' in db.metadata.finalizers" + # message: "foregroundDeletion' is absent in db.metadata.finalizers" + + # - celExpr: "has(psmdb.metadata.finalizers)" + # message: "psmdb doesn't have finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pods-in-order' in psmdb.metadata.finalizers" + # message: "'percona.com/delete-psmdb-pods-in-order' is absent in psmdb.metadata.finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pvc' in psmdb.metadata.finalizers" + # message: "percona.com/delete-psmdb-pvc' is absent in psmdb.metadata.finalizers" + + - celExpr: "!has(psmdb.spec.pmm.enabled)" + message: "psmdb.spec.pmm.enabled is not empty" + + - celExpr: "!has(psmdb.spec.pmm.serverHost)" + message: "psmdb.spec.pmm.serverHost is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.limits)" + message: "psmdb.spec.pmm.resources.limits is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.requests)" + message: "psmdb.spec.pmm.resources.requests is not empty" +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + components: + engine: + type: mongod + replicas: 2 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + # engine: + # replicas: 2 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # userSecretsName: everest-secrets-test-psmdb-cluster + # proxy: + # expose: + # type: internal +status: + # status: ready + phase: Running +--- +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: test-psmdb-cluster +spec: + imagePullPolicy: IfNotPresent + backup: + configuration: + backupOptions: + oplogSpanMin: 0 + timeouts: + startingStatus: 120 + enabled: true + pitr: {} + resources: + limits: + cpu: 300m + memory: 1G + multiCluster: + enabled: false + replsets: + - arbiter: + enabled: false + resources: {} + size: 0 + expose: + enabled: true + type: ClusterIP + name: rs0 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 2 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + secrets: + encryptionKey: test-psmdb-cluster-mongodb-encryption-key + sslInternal: test-psmdb-cluster-ssl-internal + users: everest-secrets-test-psmdb-cluster + sharding: + balancer: {} + enabled: false + unsafeFlags: {} + updateStrategy: SmartUpdate + upgradeOptions: + apply: disabled + schedule: 0 4 * * * + setFCV: true +status: + state: ready + diff --git a/examples/psmdb/test/integration/core/20-update-cluster.yaml b/examples/psmdb/test/integration/core/20-update-cluster.yaml new file mode 100644 index 0000000..298b0f1 --- /dev/null +++ b/examples/psmdb/test/integration/core/20-update-cluster.yaml @@ -0,0 +1,32 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + components: + engine: + type: mongod + replicas: 2 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + # engine: + # replicas: 2 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # userSecretsName: everest-secrets-test-psmdb-cluster + # proxy: + # expose: + # type: internal + diff --git a/examples/psmdb/test/integration/core/30-assert.yaml b/examples/psmdb/test/integration/core/30-assert.yaml new file mode 100644 index 0000000..1033ea6 --- /dev/null +++ b/examples/psmdb/test/integration/core/30-assert.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 30 +collectors: + - command: kubectl get dst/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + + # - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml + # - type: pod + # namespace: everest-system + # selector: control-plane=controller-manager + # tail: 100 +commands: + - command: kubectl wait --for=delete dst/test-psmdb-cluster -n $NAMESPACE + - command: kubectl wait --for=delete psmdb/test-psmdb-cluster -n $NAMESPACE + diff --git a/examples/psmdb/test/integration/core/30-delete-cluster.yaml b/examples/psmdb/test/integration/core/30-delete-cluster.yaml new file mode 100644 index 0000000..a0f0ab1 --- /dev/null +++ b/examples/psmdb/test/integration/core/30-delete-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + - script: kubectl -n $NAMESPACE delete dst/test-psmdb-cluster psmdb/test-psmdb-cluster --wait=false && sleep 5 + # - command: kubectl patch dst/test-psmdb-cluster -n $NAMESPACE -p '{"metadata":{"finalizers":null}}' --type merge + # - command: kubectl patch psmdb/test-psmdb-cluster -n $NAMESPACE -p '{"metadata":{"finalizers":null}}' --type merge + diff --git a/examples/psmdb/test/integration/core/40-assert.yaml b/examples/psmdb/test/integration/core/40-assert.yaml new file mode 100644 index 0000000..d809a84 --- /dev/null +++ b/examples/psmdb/test/integration/core/40-assert.yaml @@ -0,0 +1,145 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 5 +#timeout: 30 +collectors: + - command: kubectl get dst/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + + # - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml + # - type: pod + # namespace: everest-system + # selector: control-plane=controller-manager + # tail: 100 +# commands: + # - command: kubectl wait --for=jsonpath='{.status.recommendedCRVersion}'=${PSMDB_OPERATOR_VERSION} dst/test-psmdb-cluster -n ${NAMESPACE} + # - command: kubectl wait --for=jsonpath='{.spec.engine.version}'=${PSMDB_DB_ENGINE_VERSION} dst/test-psmdb-cluster -n ${NAMESPACE} + + # - command: kubectl wait --for=jsonpath='{.spec.image}'="percona/percona-server-mongodb:${PSMDB_DB_ENGINE_VERSION}" psmdb/test-psmdb-cluster -n ${NAMESPACE} +resourceRefs: + - apiVersion: everest.percona.com/v2alpha1 + kind: DataStore + name: test-psmdb-cluster + ref: dst + - apiVersion: psmdb.percona.com/v1 + kind: PerconaServerMongoDB + name: test-psmdb-cluster + ref: psmdb +assertAll: + # - celExpr: "has(dst.metadata.finalizers)" + # message: "dst doesn't have finalizers" + + # - celExpr: "'everest.percona.com/upstream-cluster-cleanup' in dst.metadata.finalizers" + # message: "'everest.percona.com/upstream-cluster-cleanup' is absent in dst.metadata.finalizers" + + # - celExpr: "'foregroundDeletion' in dst.metadata.finalizers" + # message: "foregroundDeletion' is absent in dst.metadata.finalizers" + + # - celExpr: "has(psmdb.metadata.finalizers)" + # message: "psmdb doesn't have finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pods-in-order' in psmdb.metadata.finalizers" + # message: "'percona.com/delete-psmdb-pods-in-order' is absent in psmdb.metadata.finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pvc' in psmdb.metadata.finalizers" + # message: "percona.com/delete-psmdb-pvc' is absent in psmdb.metadata.finalizers" + + - celExpr: "!has(psmdb.spec.pmm.enabled)" + message: "psmdb.spec.pmm.enabled is not empty" + + - celExpr: "!has(psmdb.spec.pmm.serverHost)" + message: "psmdb.spec.pmm.serverHost is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.limits)" + message: "psmdb.spec.pmm.resources.limits is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.requests)" + message: "psmdb.spec.pmm.resources.requests is not empty" +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + components: + engine: + type: mongod + replicas: 1 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + # engine: + # replicas: 1 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # userSecretsName: everest-secrets-test-psmdb-cluster + # proxy: + # expose: + # type: internal +--- +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: test-psmdb-cluster +spec: + imagePullPolicy: IfNotPresent + backup: + configuration: + backupOptions: + oplogSpanMin: 0 + timeouts: + startingStatus: 120 + enabled: true + pitr: {} + resources: + limits: + cpu: 300m + memory: 1G + multiCluster: + enabled: false + replsets: + - arbiter: + enabled: false + resources: {} + size: 0 + expose: + enabled: true + type: ClusterIP + name: rs0 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 1 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + secrets: + encryptionKey: test-psmdb-cluster-mongodb-encryption-key + sslInternal: test-psmdb-cluster-ssl-internal + users: everest-secrets-test-psmdb-cluster + sharding: + balancer: {} + enabled: false + unsafeFlags: {} + updateStrategy: SmartUpdate + upgradeOptions: + apply: disabled + schedule: 0 4 * * * + setFCV: true + diff --git a/examples/psmdb/test/integration/core/40-create-single-node-cluster.yaml b/examples/psmdb/test/integration/core/40-create-single-node-cluster.yaml new file mode 100644 index 0000000..f358c54 --- /dev/null +++ b/examples/psmdb/test/integration/core/40-create-single-node-cluster.yaml @@ -0,0 +1,31 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + components: + engine: + type: mongod + replicas: 1 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + # engine: + # replicas: 1 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # proxy: + # expose: + # type: internal + diff --git a/examples/psmdb/test/integration/core/41-assert.yaml b/examples/psmdb/test/integration/core/41-assert.yaml new file mode 100644 index 0000000..72534de --- /dev/null +++ b/examples/psmdb/test/integration/core/41-assert.yaml @@ -0,0 +1,136 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 5 +# timeout: 30 +collectors: + - command: kubectl get dst/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + + # - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml + # - type: pod + # namespace: everest-system + # selector: control-plane=controller-manager + # tail: 100 +resourceRefs: + - apiVersion: everest.percona.com/v2alpha1 + kind: DataStore + name: test-psmdb-cluster + ref: dst + - apiVersion: psmdb.percona.com/v1 + kind: PerconaServerMongoDB + name: test-psmdb-cluster + ref: psmdb +assertAll: + # - celExpr: "has(dst.metadata.finalizers)" + # message: "dst doesn't have finalizers" + + # - celExpr: "'everest.percona.com/upstream-cluster-cleanup' in dst.metadata.finalizers" + # message: "'everest.percona.com/upstream-cluster-cleanup' is absent in dst.metadata.finalizers" + + # - celExpr: "'foregroundDeletion' in dst.metadata.finalizers" + # message: "foregroundDeletion' is absent in dst.metadata.finalizers" + + - celExpr: "!has(psmdb.spec.pmm.enabled)" + message: "psmdb.spec.pmm.enabled is not empty" + + - celExpr: "!has(psmdb.spec.pmm.serverHost)" + message: "psmdb.spec.pmm.serverHost is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.limits)" + message: "psmdb.spec.pmm.resources.limits is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.requests)" + message: "psmdb.spec.pmm.resources.requests is not empty" +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + components: + engine: + type: mongod + replicas: 1 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + # engine: + # replicas: 1 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # userSecretsName: everest-secrets-test-psmdb-cluster + # proxy: + # expose: + # type: internal +status: + # status: ready + phase: Running +--- +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: test-psmdb-cluster +spec: + imagePullPolicy: IfNotPresent + backup: + configuration: + backupOptions: + oplogSpanMin: 0 + timeouts: + startingStatus: 120 + enabled: true + pitr: {} + resources: + limits: + cpu: 300m + memory: 1G + multiCluster: + enabled: false + replsets: + - arbiter: + enabled: false + resources: {} + size: 0 + expose: + enabled: true + type: ClusterIP + name: rs0 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 1 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + secrets: + encryptionKey: test-psmdb-cluster-mongodb-encryption-key + sslInternal: test-psmdb-cluster-ssl-internal + users: everest-secrets-test-psmdb-cluster + sharding: + balancer: {} + enabled: false + unsafeFlags: {} + updateStrategy: SmartUpdate + upgradeOptions: + apply: disabled + schedule: 0 4 * * * + setFCV: true +status: + state: ready + diff --git a/examples/psmdb/test/integration/core/41-cluster-ready.yaml b/examples/psmdb/test/integration/core/41-cluster-ready.yaml new file mode 100644 index 0000000..87a2917 --- /dev/null +++ b/examples/psmdb/test/integration/core/41-cluster-ready.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + - command: kubectl patch psmdb/test-psmdb-cluster --subresource status -n $NAMESPACE -p '{"status":{"state":"ready", "size":1, "ready":1}}' --type=merge diff --git a/examples/psmdb/test/integration/core/50-assert.yaml b/examples/psmdb/test/integration/core/50-assert.yaml new file mode 100644 index 0000000..1033ea6 --- /dev/null +++ b/examples/psmdb/test/integration/core/50-assert.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 30 +collectors: + - command: kubectl get dst/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + + # - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml + # - type: pod + # namespace: everest-system + # selector: control-plane=controller-manager + # tail: 100 +commands: + - command: kubectl wait --for=delete dst/test-psmdb-cluster -n $NAMESPACE + - command: kubectl wait --for=delete psmdb/test-psmdb-cluster -n $NAMESPACE + diff --git a/examples/psmdb/test/integration/core/50-delete-cluster.yaml b/examples/psmdb/test/integration/core/50-delete-cluster.yaml new file mode 100644 index 0000000..a0f0ab1 --- /dev/null +++ b/examples/psmdb/test/integration/core/50-delete-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + - script: kubectl -n $NAMESPACE delete dst/test-psmdb-cluster psmdb/test-psmdb-cluster --wait=false && sleep 5 + # - command: kubectl patch dst/test-psmdb-cluster -n $NAMESPACE -p '{"metadata":{"finalizers":null}}' --type merge + # - command: kubectl patch psmdb/test-psmdb-cluster -n $NAMESPACE -p '{"metadata":{"finalizers":null}}' --type merge + diff --git a/examples/psmdb/test/integration/kuttl.yaml b/examples/psmdb/test/integration/kuttl.yaml new file mode 100644 index 0000000..a080d74 --- /dev/null +++ b/examples/psmdb/test/integration/kuttl.yaml @@ -0,0 +1,9 @@ +apiVersion: kuttl.dev/v1 +kind: TestSuite +artifactsDir: /tmp/ +startKIND: false +skipDelete: false +skipClusterDelete: true +testDirs: + - test/integration + diff --git a/examples/psmdb/test/integration/sharded/00-assert.yaml b/examples/psmdb/test/integration/sharded/00-assert.yaml new file mode 100644 index 0000000..a7f6c08 --- /dev/null +++ b/examples/psmdb/test/integration/sharded/00-assert.yaml @@ -0,0 +1,29 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 30 +# collectors: +# - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml +# - command: kubectl get databaseengines/percona-server-mongodb-operator -n ${NAMESPACE} -o yaml +# - type: pod +# namespace: everest-system +# selector: control-plane=controller-manager +# tail: 100 +# commands: +# - command: kubectl wait --for=jsonpath='{.status.operatorVersion}'=${PSMDB_OPERATOR_VERSION} databaseengines/percona-server-mongodb-operator -n ${NAMESPACE} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: percona-server-mongodb-operator +spec: + replicas: 0 +# --- +# apiVersion: everest.percona.com/v1alpha1 +# kind: DatabaseEngine +# metadata: +# name: percona-server-mongodb-operator +# spec: +# type: psmdb +# status: +# status: installed + diff --git a/examples/psmdb/test/integration/sharded/00-install.yaml b/examples/psmdb/test/integration/sharded/00-install.yaml new file mode 100644 index 0000000..d412e3f --- /dev/null +++ b/examples/psmdb/test/integration/sharded/00-install.yaml @@ -0,0 +1,8 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + # # Need to patch KUTTL's namespace to add the label so that the Everest Operator can reconcile resources from it. + # - command: kubectl patch ns ${NAMESPACE} -p '{"metadata":{"labels":{"app.kubernetes.io/managed-by":"everest"}}}' --type merge + - script: "curl -fsSL https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/v${PSMDB_OPERATOR_VERSION}/deploy/operator.yaml | sed 's/replicas: [0-9]/replicas: 0/g' | kubectl apply -n ${NAMESPACE} -f -" + diff --git a/examples/psmdb/test/integration/sharded/10-assert.yaml b/examples/psmdb/test/integration/sharded/10-assert.yaml new file mode 100644 index 0000000..6786177 --- /dev/null +++ b/examples/psmdb/test/integration/sharded/10-assert.yaml @@ -0,0 +1,230 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 5 +# timeout: 30 +collectors: + - command: kubectl get dst/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + +# - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml +# - type: pod +# namespace: everest-system +# selector: control-plane=controller-manager +# tail: 100 +# commands: +# # TODO check psmdb.spec.CRVersion +# # TODO check psmdb.spec.backup.image +# # TODO check dbc.status.recommendedCRVersion +# - command: kubectl wait --for=jsonpath='{.status.crVersion}'=${PSMDB_OPERATOR_VERSION} dbc/test-psmdb-cluster -n ${NAMESPACE} +# - command: kubectl wait --for=jsonpath='{.spec.engine.version}'=${PSMDB_DB_ENGINE_VERSION} dbc/test-psmdb-cluster -n ${NAMESPACE} +# - command: kubectl wait --for=jsonpath='{.spec.image}'="percona/percona-server-mongodb:${PSMDB_DB_ENGINE_VERSION}" psmdb/test-psmdb-cluster -n ${NAMESPACE} +resourceRefs: + - apiVersion: everest.percona.com/v2alpha1 + kind: DataStore + name: test-psmdb-cluster + ref: dst + - apiVersion: psmdb.percona.com/v1 + kind: PerconaServerMongoDB + name: test-psmdb-cluster + ref: psmdb +assertAll: + # - celExpr: "has(dst.metadata.finalizers)" + # message: "dst doesn't have finalizers" + + # - celExpr: "'everest.percona.com/upstream-cluster-cleanup' in dst.metadata.finalizers" + # message: "'everest.percona.com/upstream-cluster-cleanup' is absent in dst.metadata.finalizers" + + # - celExpr: "'foregroundDeletion' in dst.metadata.finalizers" + # message: "foregroundDeletion' is absent in dst.metadata.finalizers" + + # - celExpr: "has(psmdb.metadata.finalizers)" + # message: "psmdb doesn't have finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pods-in-order' in psmdb.metadata.finalizers" + # message: "'percona.com/delete-psmdb-pods-in-order' is absent in psmdb.metadata.finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pvc' in psmdb.metadata.finalizers" + # message: "percona.com/delete-psmdb-pvc' is absent in psmdb.metadata.finalizers" + + - celExpr: "!has(psmdb.spec.pmm.enabled)" + message: "psmdb.spec.pmm.enabled is not empty" + + - celExpr: "!has(psmdb.spec.pmm.serverHost)" + message: "psmdb.spec.pmm.serverHost is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.limits)" + message: "psmdb.spec.pmm.resources.limits is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.requests)" + message: "psmdb.spec.pmm.resources.requests is not empty" +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + topology: + type: sharded + config: + numShards: 2 + components: + engine: + type: mongod + replicas: 3 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + proxy: + type: mongod + replicas: 3 + configServer: + type: mongod + replicas: 3 + storage: + size: 25Gi + # engine: + # replicas: 3 + # resources: + # cpu: "1" + # memory: 4G + # storage: + # size: 25Gi + # type: psmdb + # userSecretsName: everest-secrets-test-psmdb-cluster + # proxy: + # expose: + # type: internal +--- +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: test-psmdb-cluster +spec: + backup: + configuration: + backupOptions: + oplogSpanMin: 0 + timeouts: + startingStatus: 120 + enabled: true + pitr: {} + resources: + limits: + cpu: 300m + memory: 1G + enableVolumeExpansion: true + imagePullPolicy: IfNotPresent + multiCluster: + enabled: false + pmm: + image: "" + resources: {} + replsets: + - arbiter: + enabled: false + resources: {} + size: 0 + configuration: |2 + + operationProfiling: + mode: slowOp + expose: + enabled: false + type: ClusterIP + hidden: + enabled: false + resources: {} + size: 0 + name: rs0 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + - arbiter: + enabled: false + resources: {} + size: 0 + configuration: |2 + + operationProfiling: + mode: slowOp + expose: + enabled: false + type: ClusterIP + hidden: + enabled: false + resources: {} + size: 0 + name: rs1 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + secrets: + encryptionKey: test-psmdb-cluster-mongodb-encryption-key + sslInternal: test-psmdb-cluster-ssl-internal + users: everest-secrets-test-psmdb-cluster + sharding: + balancer: {} + configsvrReplSet: + arbiter: + enabled: false + resources: {} + size: 0 + expose: + enabled: false + hidden: + enabled: false + resources: {} + size: 0 + nonvoting: + enabled: false + resources: {} + size: 0 + resources: {} + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + enabled: true + mongos: + expose: + type: ClusterIP + resources: {} + size: 3 + unsafeFlags: {} + updateStrategy: SmartUpdate + upgradeOptions: + apply: disabled + schedule: 0 4 * * * + setFCV: true + diff --git a/examples/psmdb/test/integration/sharded/10-create-cluster.yaml b/examples/psmdb/test/integration/sharded/10-create-cluster.yaml new file mode 100644 index 0000000..cbb4fd8 --- /dev/null +++ b/examples/psmdb/test/integration/sharded/10-create-cluster.yaml @@ -0,0 +1,50 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + topology: + type: sharded + config: + numShards: 2 + components: + engine: + type: mongod + replicas: 3 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + proxy: + type: mongod + replicas: 3 + configServer: + type: mongod + replicas: 3 + storage: + size: 25Gi +# spec: +# engine: +# crVersion: 1.21.1 +# replicas: 3 +# resources: +# cpu: "1" +# memory: 4G +# storage: +# size: 25Gi +# type: psmdb +# proxy: +# expose: +# type: internal +# sharding: +# configServer: +# replicas: 3 +# enabled: true +# shards: 2 + diff --git a/examples/psmdb/test/integration/sharded/11-assert.yaml b/examples/psmdb/test/integration/sharded/11-assert.yaml new file mode 100644 index 0000000..f3aefc6 --- /dev/null +++ b/examples/psmdb/test/integration/sharded/11-assert.yaml @@ -0,0 +1,235 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 5 +# timeout: 30 +collectors: + - command: kubectl get dbc/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + +# - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml +# - type: pod +# namespace: everest-system +# selector: control-plane=controller-manager +# tail: 100 +# commands: +# # TODO check psmdb.spec.CRVersion +# # TODO check psmdb.spec.backup.image +# # TODO check dbc.status.recommendedCRVersion +# - command: kubectl wait --for=jsonpath='{.status.crVersion}'=${PSMDB_OPERATOR_VERSION} dbc/test-psmdb-cluster -n ${NAMESPACE} +# - command: kubectl wait --for=jsonpath='{.spec.engine.version}'=${PSMDB_DB_ENGINE_VERSION} dbc/test-psmdb-cluster -n ${NAMESPACE} +# - command: kubectl wait --for=jsonpath='{.spec.image}'="percona/percona-server-mongodb:${PSMDB_DB_ENGINE_VERSION}" psmdb/test-psmdb-cluster -n ${NAMESPACE} +resourceRefs: + - apiVersion: everest.percona.com/v2alpha1 + kind: DataStore + name: test-psmdb-cluster + ref: db + - apiVersion: psmdb.percona.com/v1 + kind: PerconaServerMongoDB + name: test-psmdb-cluster + ref: psmdb +assertAll: + # - celExpr: "has(db.metadata.finalizers)" + # message: "db doesn't have finalizers" + + # - celExpr: "'everest.percona.com/upstream-cluster-cleanup' in db.metadata.finalizers" + # message: "'everest.percona.com/upstream-cluster-cleanup' is absent in db.metadata.finalizers" + + # - celExpr: "'foregroundDeletion' in db.metadata.finalizers" + # message: "foregroundDeletion' is absent in db.metadata.finalizers" + + # - celExpr: "has(psmdb.metadata.finalizers)" + # message: "psmdb doesn't have finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pods-in-order' in psmdb.metadata.finalizers" + # message: "'percona.com/delete-psmdb-pods-in-order' is absent in psmdb.metadata.finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pvc' in psmdb.metadata.finalizers" + # message: "percona.com/delete-psmdb-pvc' is absent in psmdb.metadata.finalizers" + + - celExpr: "!has(psmdb.spec.pmm.enabled)" + message: "psmdb.spec.pmm.enabled is not empty" + + - celExpr: "!has(psmdb.spec.pmm.serverHost)" + message: "psmdb.spec.pmm.serverHost is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.limits)" + message: "psmdb.spec.pmm.resources.limits is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.requests)" + message: "psmdb.spec.pmm.resources.requests is not empty" +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + topology: + type: sharded + config: + numShards: 2 + components: + engine: + type: mongod + replicas: 3 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + proxy: + type: mongod + replicas: 3 + configServer: + type: mongod + replicas: 3 + storage: + size: 25Gi +# spec: +# engine: +# replicas: 3 +# resources: +# cpu: "1" +# memory: 4G +# storage: +# size: 25Gi +# type: psmdb +# userSecretsName: everest-secrets-test-psmdb-cluster +# proxy: +# expose: +# type: internal +status: + # status: ready + phase: Running +--- +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: test-psmdb-cluster +spec: + backup: + configuration: + backupOptions: + oplogSpanMin: 0 + timeouts: + startingStatus: 120 + enabled: true + pitr: {} + resources: + limits: + cpu: 300m + memory: 1G + enableVolumeExpansion: true + imagePullPolicy: IfNotPresent + multiCluster: + enabled: false + pmm: + image: "" + resources: {} + replsets: + - arbiter: + enabled: false + resources: {} + size: 0 + configuration: |2 + + operationProfiling: + mode: slowOp + expose: + enabled: false + type: ClusterIP + hidden: + enabled: false + resources: {} + size: 0 + name: rs0 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + - arbiter: + enabled: false + resources: {} + size: 0 + configuration: |2 + + operationProfiling: + mode: slowOp + expose: + enabled: false + hidden: + enabled: false + resources: {} + size: 0 + name: rs1 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + secrets: + encryptionKey: test-psmdb-cluster-mongodb-encryption-key + sslInternal: test-psmdb-cluster-ssl-internal + users: everest-secrets-test-psmdb-cluster + sharding: + balancer: {} + configsvrReplSet: + arbiter: + enabled: false + resources: {} + size: 0 + expose: + enabled: false + hidden: + enabled: false + resources: {} + size: 0 + nonvoting: + enabled: false + resources: {} + size: 0 + resources: {} + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + enabled: true + mongos: + expose: + type: ClusterIP + resources: {} + size: 3 + unsafeFlags: {} + updateStrategy: SmartUpdate + upgradeOptions: + apply: disabled + schedule: 0 4 * * * + setFCV: true +status: + state: ready + diff --git a/examples/psmdb/test/integration/sharded/11-cluster-ready.yaml b/examples/psmdb/test/integration/sharded/11-cluster-ready.yaml new file mode 100644 index 0000000..87a2917 --- /dev/null +++ b/examples/psmdb/test/integration/sharded/11-cluster-ready.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + - command: kubectl patch psmdb/test-psmdb-cluster --subresource status -n $NAMESPACE -p '{"status":{"state":"ready", "size":1, "ready":1}}' --type=merge diff --git a/examples/psmdb/test/integration/sharded/20-assert.yaml b/examples/psmdb/test/integration/sharded/20-assert.yaml new file mode 100644 index 0000000..b0301ac --- /dev/null +++ b/examples/psmdb/test/integration/sharded/20-assert.yaml @@ -0,0 +1,273 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 10 +# timeout: 30 +collectors: + - command: kubectl get dbc/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + +# - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml +# - type: pod +# namespace: everest-system +# selector: control-plane=controller-manager +# tail: 100 +# commands: +# # TODO check psmdb.spec.CRVersion +# # TODO check psmdb.spec.backup.image +# # TODO check dbc.status.recommendedCRVersion +# - command: kubectl wait --for=jsonpath='{.status.crVersion}'=${PSMDB_OPERATOR_VERSION} dbc/test-psmdb-cluster -n ${NAMESPACE} +# - command: kubectl wait --for=jsonpath='{.spec.engine.version}'=${PSMDB_DB_ENGINE_VERSION} dbc/test-psmdb-cluster -n ${NAMESPACE} +# - command: kubectl wait --for=jsonpath='{.spec.image}'="percona/percona-server-mongodb:${PSMDB_DB_ENGINE_VERSION}" psmdb/test-psmdb-cluster -n ${NAMESPACE} +resourceRefs: + - apiVersion: everest.percona.com/v2alpha1 + kind: DataStore + name: test-psmdb-cluster + ref: db + - apiVersion: psmdb.percona.com/v1 + kind: PerconaServerMongoDB + name: test-psmdb-cluster + ref: psmdb +assertAll: + # - celExpr: "has(db.metadata.finalizers)" + # message: "db doesn't have finalizers" + + # - celExpr: "'everest.percona.com/upstream-cluster-cleanup' in db.metadata.finalizers" + # message: "'everest.percona.com/upstream-cluster-cleanup' is absent in db.metadata.finalizers" + + # - celExpr: "'foregroundDeletion' in db.metadata.finalizers" + # message: "foregroundDeletion' is absent in db.metadata.finalizers" + + # - celExpr: "has(psmdb.metadata.finalizers)" + # message: "psmdb doesn't have finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pods-in-order' in psmdb.metadata.finalizers" + # message: "'percona.com/delete-psmdb-pods-in-order' is absent in psmdb.metadata.finalizers" + + # - celExpr: "'percona.com/delete-psmdb-pvc' in psmdb.metadata.finalizers" + # message: "percona.com/delete-psmdb-pvc' is absent in psmdb.metadata.finalizers" + + - celExpr: "!has(psmdb.spec.pmm.enabled)" + message: "psmdb.spec.pmm.enabled is not empty" + + - celExpr: "!has(psmdb.spec.pmm.serverHost)" + message: "psmdb.spec.pmm.serverHost is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.limits)" + message: "psmdb.spec.pmm.resources.limits is not empty" + + - celExpr: "!has(psmdb.spec.pmm.resources.requests)" + message: "psmdb.spec.pmm.resources.requests is not empty" +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + topology: + type: sharded + config: + numShards: 3 + components: + engine: + type: mongod + replicas: 3 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + proxy: + type: mongod + replicas: 3 + configServer: + type: mongod + replicas: 3 + storage: + size: 25Gi +# spec: +# engine: +# crVersion: 1.21.1 +# replicas: 3 +# resources: +# cpu: "1" +# memory: 4G +# storage: +# size: 25Gi +# type: psmdb +# proxy: +# expose: +# type: internal +# sharding: +# configServer: +# replicas: 3 +# enabled: true +# shards: 3 +status: + # status: ready + phase: Running +--- +apiVersion: psmdb.percona.com/v1 +kind: PerconaServerMongoDB +metadata: + name: test-psmdb-cluster +spec: + backup: + configuration: + backupOptions: + oplogSpanMin: 0 + timeouts: + startingStatus: 120 + enabled: true + pitr: {} + resources: + limits: + cpu: 300m + memory: 1G + enableVolumeExpansion: true + imagePullPolicy: IfNotPresent + multiCluster: + enabled: false + pmm: + image: "" + resources: {} + replsets: + - arbiter: + enabled: false + resources: {} + size: 0 + configuration: |2 + + operationProfiling: + mode: slowOp + expose: + enabled: false + type: ClusterIP + hidden: + enabled: false + resources: {} + size: 0 + name: rs0 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + - arbiter: + enabled: false + resources: {} + size: 0 + configuration: |2 + + operationProfiling: + mode: slowOp + expose: + enabled: false + type: ClusterIP + hidden: + enabled: false + resources: {} + size: 0 + name: rs1 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + - arbiter: + enabled: false + resources: {} + size: 0 + configuration: |2 + + operationProfiling: + mode: slowOp + expose: + enabled: false + type: ClusterIP + hidden: + enabled: false + resources: {} + size: 0 + name: rs2 + nonvoting: + enabled: false + resources: {} + size: 0 + podDisruptionBudget: + maxUnavailable: 1 + resources: + limits: + cpu: "1" + memory: 4G + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + secrets: + encryptionKey: test-psmdb-cluster-mongodb-encryption-key + sslInternal: test-psmdb-cluster-ssl-internal + users: everest-secrets-test-psmdb-cluster + sharding: + balancer: {} + configsvrReplSet: + arbiter: + enabled: false + resources: {} + size: 0 + expose: + enabled: false + hidden: + enabled: false + resources: {} + size: 0 + nonvoting: + enabled: false + resources: {} + size: 0 + resources: {} + size: 3 + volumeSpec: + persistentVolumeClaim: + resources: + requests: + storage: 25Gi + enabled: true + mongos: + expose: + type: ClusterIP + resources: {} + size: 3 + unsafeFlags: {} + updateStrategy: SmartUpdate + upgradeOptions: + apply: disabled + schedule: 0 4 * * * + setFCV: true +status: + state: ready + diff --git a/examples/psmdb/test/integration/sharded/20-update-cluster.yaml b/examples/psmdb/test/integration/sharded/20-update-cluster.yaml new file mode 100644 index 0000000..c5fbb72 --- /dev/null +++ b/examples/psmdb/test/integration/sharded/20-update-cluster.yaml @@ -0,0 +1,50 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +--- +apiVersion: everest.percona.com/v2alpha1 +kind: DataStore +metadata: + name: test-psmdb-cluster +spec: + provider: psmdb + topology: + type: sharded + config: + numShards: 3 + components: + engine: + type: mongod + replicas: 3 + resources: + cpu: "1" + memory: 4G + storage: + size: 25Gi + proxy: + type: mongod + replicas: 3 + configServer: + type: mongod + replicas: 3 + storage: + size: 25Gi +# spec: +# engine: +# crVersion: 1.21.1 +# replicas: 3 +# resources: +# cpu: "1" +# memory: 4G +# storage: +# size: 25Gi +# type: psmdb +# proxy: +# expose: +# type: internal +# sharding: +# configServer: +# replicas: 3 +# enabled: true +# shards: 3 + diff --git a/examples/psmdb/test/integration/sharded/30-assert.yaml b/examples/psmdb/test/integration/sharded/30-assert.yaml new file mode 100644 index 0000000..1033ea6 --- /dev/null +++ b/examples/psmdb/test/integration/sharded/30-assert.yaml @@ -0,0 +1,16 @@ +apiVersion: kuttl.dev/v1 +kind: TestAssert +timeout: 30 +collectors: + - command: kubectl get dst/test-psmdb-cluster -n ${NAMESPACE} -o yaml + - command: kubectl get psmdb/test-psmdb-cluster -n ${NAMESPACE} -o yaml + + # - command: kubectl get deploy/everest-controller-manager -n everest-system -o yaml + # - type: pod + # namespace: everest-system + # selector: control-plane=controller-manager + # tail: 100 +commands: + - command: kubectl wait --for=delete dst/test-psmdb-cluster -n $NAMESPACE + - command: kubectl wait --for=delete psmdb/test-psmdb-cluster -n $NAMESPACE + diff --git a/examples/psmdb/test/integration/sharded/30-delete-cluster.yaml b/examples/psmdb/test/integration/sharded/30-delete-cluster.yaml new file mode 100644 index 0000000..13a7519 --- /dev/null +++ b/examples/psmdb/test/integration/sharded/30-delete-cluster.yaml @@ -0,0 +1,8 @@ +apiVersion: kuttl.dev/v1 +kind: TestStep +timeout: 10 +commands: + - script: kubectl -n $NAMESPACE delete dst/test-psmdb-cluster psmdb/test-psmdb-cluster --wait=false && sleep 5 + # - command: kubectl patch db/test-psmdb-cluster -n $NAMESPACE -p '{"metadata":{"finalizers":null}}' --type merge + # - command: kubectl patch psmdb/test-psmdb-cluster -n $NAMESPACE -p '{"metadata":{"finalizers":null}}' --type merge + diff --git a/examples/psmdb/test/vars.sh b/examples/psmdb/test/vars.sh new file mode 100755 index 0000000..5a6eec3 --- /dev/null +++ b/examples/psmdb/test/vars.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +## ===== General environment variables for the Percona Operator tests ===== +export OPERATOR_ROOT_PATH=${OPERATOR_ROOT_PATH:-${PWD}} +echo "OPERATOR_ROOT_PATH=${OPERATOR_ROOT_PATH}" + +export PERCONA_VERSION_SERVICE_URL=${PERCONA_VERSION_SERVICE_URL:-"https://check-dev.percona.com/versions/v1"} +echo "PERCONA_VERSION_SERVICE_URL=${PERCONA_VERSION_SERVICE_URL}" + +## ======= Upstream DB operators params for testing =============== +export PXC_OPERATOR_VERSION=${PXC_OPERATOR_VERSION:-"1.18.0"} +echo "PXC_OPERATOR_VERSION=${PXC_OPERATOR_VERSION}" + +export PXC_DB_ENGINE_VERSION=${PXC_DB_ENGINE_VERSION:-"8.0.42-33.1"} +echo "PXC_DB_ENGINE_VERSION=${PXC_DB_ENGINE_VERSION}" + +# Recommended DB engine version available in PREVIOUS_PXC_OPERATOR_VERSION +export PREVIOUS_PXC_DB_ENGINE_VERSION=${PREVIOUS_PXC_DB_ENGINE_VERSION:-"8.0.41-32.1"} +echo "PREVIOUS_PXC_DB_ENGINE_VERSION=${PREVIOUS_PXC_DB_ENGINE_VERSION}" + +export PSMDB_OPERATOR_VERSION=${PSMDB_OPERATOR_VERSION:-"1.21.1"} +echo "PSMDB_OPERATOR_VERSION=${PSMDB_OPERATOR_VERSION}" + +export PSMDB_DB_ENGINE_VERSION=${PSMDB_DB_ENGINE_VERSION:-"8.0.12-4"} +echo "PSMDB_DB_ENGINE_VERSION=${PSMDB_DB_ENGINE_VERSION}" + +# Recommended DB engine version available in PREVIOUS_PSMDB_OPERATOR_VERSION +export PREVIOUS_PSMDB_DB_ENGINE_VERSION=${PREVIOUS_PSMDB_DB_ENGINE_VERSION:-"7.0.15-9"} +echo "PREVIOUS_PSMDB_DB_ENGINE_VERSION=${PREVIOUS_PSMDB_DB_ENGINE_VERSION}" + +export PG_OPERATOR_VERSION=${PG_OPERATOR_VERSION:-"2.8.2"} +echo "PG_OPERATOR_VERSION=${PG_OPERATOR_VERSION}" + +export PG_DB_ENGINE_VERSION=${PG_DB_ENGINE_VERSION:-"17.7"} +echo "PG_DB_ENGINE_VERSION=${PG_DB_ENGINE_VERSION}" + +# Recommended DB engine version available in PREVIOUS_PG_OPERATOR_VERSION +export PREVIOUS_PG_DB_ENGINE_VERSION=${PREVIOUS_PG_DB_ENGINE_VERSION:-"17.5.2"} +echo "PREVIOUS_PG_DB_ENGINE_VERSION=${PREVIOUS_PG_DB_ENGINE_VERSION}" + +# Previous versions of the operators for testing upstream DB operators upgrades. +export PREVIOUS_PG_OPERATOR_VERSION=${PREVIOUS_PG_OPERATOR_VERSION:-"2.7.0"} +echo "PREVIOUS_PG_OPERATOR_VERSION=${PREVIOUS_PG_OPERATOR_VERSION}" + +export PREVIOUS_PXC_OPERATOR_VERSION=${PREVIOUS_PXC_OPERATOR_VERSION:-"1.17.0"} +echo "PREVIOUS_PXC_OPERATOR_VERSION=${PREVIOUS_PXC_OPERATOR_VERSION}" + +export PREVIOUS_PSMDB_OPERATOR_VERSION=${PREVIOUS_PSMDB_OPERATOR_VERSION:-"1.19.1"} +echo "PREVIOUS_PSMDB_OPERATOR_VERSION=${PREVIOUS_PSMDB_OPERATOR_VERSION}" + +## ============== K3D cluster configuration =================== +# export KUBECONFIG="${KUBECONFIG:-${OPERATOR_ROOT_PATH}/test/kubeconfig}" +# echo "KUBECONFIG=${KUBECONFIG}" + diff --git a/examples/psmdb/types/generated/zz_generated.openapi.go b/examples/psmdb/types/generated/zz_generated.openapi.go new file mode 100644 index 0000000..57b6f9a --- /dev/null +++ b/examples/psmdb/types/generated/zz_generated.openapi.go @@ -0,0 +1,30 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by openapi-gen. DO NOT EDIT. + +package generated + +import ( + common "k8s.io/kube-openapi/pkg/common" +) + +func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { + return map[string]common.OpenAPIDefinition{} +} diff --git a/examples/psmdb/types/types.go b/examples/psmdb/types/types.go new file mode 100644 index 0000000..66f738e --- /dev/null +++ b/examples/psmdb/types/types.go @@ -0,0 +1,91 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package psmdbspec contains custom spec types for the PSMDB (Percona Server MongoDB) provider. +// These types are annotated with k8s:validation markers for OpenAPI schema generation. +// +// To regenerate OpenAPI schemas after modifying these types: +// +// make generate-openapi +// +// The generated code will be placed in examples/psmdbspec/generated/zz_generated.openapi.go +// +// +k8s:openapi-gen=true +package types + +// ============================================================================= +// MONGOD COMPONENT SPEC +// ============================================================================= + +// MongodCustomSpec defines custom configuration for mongod components. +// This struct is converted to OpenAPI schema and served via the /schema endpoint. +// Provider users can specify these fields in the DataStore's component CustomSpec. +type MongodCustomSpec struct{} + +// ============================================================================= +// MONGOS COMPONENT SPEC +// ============================================================================= + +// MongosCustomSpec defines custom configuration for mongos (proxy) components. +type MongosCustomSpec struct{} + +// ============================================================================= +// PMM (MONITORING) COMPONENT SPEC +// ============================================================================= + +// PMMCustomSpec defines custom configuration for PMM monitoring. +type PMMCustomSpec struct{} + +// ============================================================================= +// BACKUP COMPONENT SPEC +// ============================================================================= + +// BackupCustomSpec defines custom configuration for backup agents. +type BackupCustomSpec struct{} + +// ============================================================================= +// TOPOLOGY SPECS +// ============================================================================= + +// TopologyType defines the type of deployment topology. +type TopologyType string + +const ( + // TopologyTypeReplicaSet represents a replica set topology. + TopologyTypeReplicaSet TopologyType = "replicaSet" + // TopologyTypeSharded represents a sharded cluster topology. + TopologyTypeSharded TopologyType = "sharded" +) + +// ReplicaSetTopologyConfig defines configuration for replica set topology. +type ReplicaSetTopologyConfig struct { +} + +// ShardedTopologyConfig defines configuration for sharded cluster topology. +type ShardedTopologyConfig struct { + // NumShards specifies the initial number of shards. + // +k8s:validation:minimum=1 + // +default=2 + // +optional + NumShards int32 `json:"numShards,omitempty"` +} + +// ============================================================================= +// GLOBAL CONFIG +// ============================================================================= + +// GlobalConfig defines global configuration that applies to the entire cluster. +type GlobalConfig struct{} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..d8d394c --- /dev/null +++ b/go.mod @@ -0,0 +1,108 @@ +module github.com/openeverest/provider-sdk + +go 1.25.5 + +require ( + github.com/AlekSi/pointer v1.2.0 + github.com/getkin/kin-openapi v0.133.0 + github.com/percona/percona-server-mongodb-operator v1.21.1 + k8s.io/api v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 + sigs.k8s.io/controller-runtime v0.22.4 + sigs.k8s.io/yaml v1.6.0 +) + +require ( + github.com/aws/aws-sdk-go-v2 v1.39.0 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cert-manager/cert-manager v1.18.2 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.2 // indirect + github.com/go-openapi/swag v0.25.1 // indirect + github.com/go-openapi/swag/cmdutils v0.25.1 // indirect + github.com/go-openapi/swag/conv v0.25.1 // indirect + github.com/go-openapi/swag/fileutils v0.25.1 // indirect + github.com/go-openapi/swag/jsonname v0.25.1 // indirect + github.com/go-openapi/swag/jsonutils v0.25.1 // indirect + github.com/go-openapi/swag/loading v0.25.1 // indirect + github.com/go-openapi/swag/mangling v0.25.1 // indirect + github.com/go-openapi/swag/netutils v0.25.1 // indirect + github.com/go-openapi/swag/stringutils v0.25.1 // indirect + github.com/go-openapi/swag/typeutils v0.25.1 // indirect + github.com/go-openapi/swag/yamlutils v0.25.1 // indirect + github.com/golang/snappy v1.0.0 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/montanaflynn/stats v0.7.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect + github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect + github.com/percona/percona-backup-mongodb v1.8.1-0.20250925114718-2f499a6a31c8 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/woodsbury/decimal128 v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect + go.mongodb.org/mongo-driver v1.17.4 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.31.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.13.0 // indirect + golang.org/x/tools v0.39.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/apiextensions-apiserver v0.35.0 // indirect + k8s.io/client-go v0.35.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + sigs.k8s.io/gateway-api v1.1.0 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/mcs-api v0.3.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..fe83628 --- /dev/null +++ b/go.sum @@ -0,0 +1,275 @@ +github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= +github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go-v2 v1.39.0 h1:xm5WV/2L4emMRmMjHFykqiA4M/ra0DJVSWUkDyBjbg4= +github.com/aws/aws-sdk-go-v2 v1.39.0/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cert-manager/cert-manager v1.18.2 h1:H2P75ycGcTMauV3gvpkDqLdS3RSXonWF2S49QGA1PZE= +github.com/cert-manager/cert-manager v1.18.2/go.mod h1:icDJx4kG9BCNpGjBvrmsFd99d+lXUvWdkkcrSSQdIiw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ= +github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU= +github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ= +github.com/go-openapi/swag v0.25.1 h1:6uwVsx+/OuvFVPqfQmOOPsqTcm5/GkBhNwLqIR916n8= +github.com/go-openapi/swag v0.25.1/go.mod h1:bzONdGlT0fkStgGPd3bhZf1MnuPkf2YAys6h+jZipOo= +github.com/go-openapi/swag/cmdutils v0.25.1 h1:nDke3nAFDArAa631aitksFGj2omusks88GF1VwdYqPY= +github.com/go-openapi/swag/cmdutils v0.25.1/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.1 h1:+9o8YUg6QuqqBM5X6rYL/p1dpWeZRhoIt9x7CCP+he0= +github.com/go-openapi/swag/conv v0.25.1/go.mod h1:Z1mFEGPfyIKPu0806khI3zF+/EUXde+fdeksUl2NiDs= +github.com/go-openapi/swag/fileutils v0.25.1 h1:rSRXapjQequt7kqalKXdcpIegIShhTPXx7yw0kek2uU= +github.com/go-openapi/swag/fileutils v0.25.1/go.mod h1:+NXtt5xNZZqmpIpjqcujqojGFek9/w55b3ecmOdtg8M= +github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU= +github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo= +github.com/go-openapi/swag/jsonutils v0.25.1 h1:AihLHaD0brrkJoMqEZOBNzTLnk81Kg9cWr+SPtxtgl8= +github.com/go-openapi/swag/jsonutils v0.25.1/go.mod h1:JpEkAjxQXpiaHmRO04N1zE4qbUEg3b7Udll7AMGTNOo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1 h1:DSQGcdB6G0N9c/KhtpYc71PzzGEIc/fZ1no35x4/XBY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1/go.mod h1:kjmweouyPwRUEYMSrbAidoLMGeJ5p6zdHi9BgZiqmsg= +github.com/go-openapi/swag/loading v0.25.1 h1:6OruqzjWoJyanZOim58iG2vj934TysYVptyaoXS24kw= +github.com/go-openapi/swag/loading v0.25.1/go.mod h1:xoIe2EG32NOYYbqxvXgPzne989bWvSNoWoyQVWEZicc= +github.com/go-openapi/swag/mangling v0.25.1 h1:XzILnLzhZPZNtmxKaz/2xIGPQsBsvmCjrJOWGNz/ync= +github.com/go-openapi/swag/mangling v0.25.1/go.mod h1:CdiMQ6pnfAgyQGSOIYnZkXvqhnnwOn997uXZMAd/7mQ= +github.com/go-openapi/swag/netutils v0.25.1 h1:2wFLYahe40tDUHfKT1GRC4rfa5T1B4GWZ+msEFA4Fl4= +github.com/go-openapi/swag/netutils v0.25.1/go.mod h1:CAkkvqnUJX8NV96tNhEQvKz8SQo2KF0f7LleiJwIeRE= +github.com/go-openapi/swag/stringutils v0.25.1 h1:Xasqgjvk30eUe8VKdmyzKtjkVjeiXx1Iz0zDfMNpPbw= +github.com/go-openapi/swag/stringutils v0.25.1/go.mod h1:JLdSAq5169HaiDUbTvArA2yQxmgn4D6h4A+4HqVvAYg= +github.com/go-openapi/swag/typeutils v0.25.1 h1:rD/9HsEQieewNt6/k+JBwkxuAHktFtH3I3ysiFZqukA= +github.com/go-openapi/swag/typeutils v0.25.1/go.mod h1:9McMC/oCdS4BKwk2shEB7x17P6HmMmA6dQRtAkSnNb8= +github.com/go-openapi/swag/yamlutils v0.25.1 h1:mry5ez8joJwzvMbaTGLhw8pXUnhDK91oSJLDPF1bmGk= +github.com/go-openapi/swag/yamlutils v0.25.1/go.mod h1:cm9ywbzncy3y6uPm/97ysW8+wZ09qsks+9RS8fLWKqg= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/percona/percona-backup-mongodb v1.8.1-0.20250925114718-2f499a6a31c8 h1:iHJ1QTVTLgYCR5Jr9b99cBiXt4Flp+S37cBp5soIqy0= +github.com/percona/percona-backup-mongodb v1.8.1-0.20250925114718-2f499a6a31c8/go.mod h1:ji37sYcYJM4XWpcyqxmwIS9knw4vLNVSJF8LHye8HPE= +github.com/percona/percona-server-mongodb-operator v1.21.1 h1:ven97PFWl28Ot0pgFQ82x0nyjuuueFrQRbGI+cFb7E4= +github.com/percona/percona-server-mongodb-operator v1.21.1/go.mod h1:SOrR+TGJGSmF3lX/z4FwgUAzjTnqg22y5NoIKWXNXgw= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/woodsbury/decimal128 v1.3.0 h1:8pffMNWIlC0O5vbyHWFZAt5yWvWcrHA+3ovIIjVWss0= +github.com/woodsbury/decimal128 v1.3.0/go.mod h1:C5UTmyTjW3JftjUFzOVhC20BEQa2a4ZKOB5I6Zjb+ds= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= +go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= +sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/mcs-api v0.3.0 h1:LjRvgzjMrvO1904GP6XBJSnIX221DJMyQlZOYt9LAnM= +sigs.k8s.io/mcs-api v0.3.0/go.mod h1:zZ5CK8uS6HaLkxY4HqsmcBHfzHuNMrY2uJy8T7jffK4= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 0000000..06a460e --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/pkg/apis/v2alpha1/datastore_types.go b/pkg/apis/v2alpha1/datastore_types.go new file mode 100644 index 0000000..2375608 --- /dev/null +++ b/pkg/apis/v2alpha1/datastore_types.go @@ -0,0 +1,183 @@ +package v2alpha1 + +import ( + "encoding/json" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=dst;dstore +type DataStore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataStoreSpec `json:"spec,omitempty"` + Status DataStoreStatus `json:"status,omitempty"` +} + +// TopologySpec defines the deployment topology and its configuration. +type TopologySpec struct { + // Type is the topology name (e.g., "sharded", "replicaset"). + // The available topologies are defined by the provider. + // If omitted, the provider's default topology is used. + // +optional + Type string `json:"type,omitempty"` + + // Config contains topology-specific configuration. + // The schema for this field is defined by the provider's TopologyDefinition. + // Examples: shard count for sharded topology, replication factor, etc. + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + Config *runtime.RawExtension `json:"config,omitempty"` +} + +type DataStoreSpec struct { + // Provider is the name of the database provider (e.g., "psmdb", "postgresql"). + Provider string `json:"provider,omitempty"` + + // Topology defines the deployment topology and its configuration. + // +optional + Topology *TopologySpec `json:"topology,omitempty"` + + // Global contains provider-level configuration that applies to the entire cluster. + // The schema for this field is defined by the provider's GlobalSchema. + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + Global *runtime.RawExtension `json:"global,omitempty"` + + // Components defines the component instances for this cluster. + // The keys are component names (e.g., "engine", "proxy", "backupAgent"). + // Which components are valid depends on the selected topology. + Components map[string]ComponentSpec `json:"components,omitempty"` +} + +// GetComponentsOfType returns all components that match the given type. +func (ds *DataStore) GetComponentsOfType(t string) []ComponentSpec { + var result []ComponentSpec + for _, c := range ds.Spec.Components { + if c.Type == t { + result = append(result, c) + } + } + return result +} + +// GetTopologyType returns the topology type, or empty string if not specified. +func (ds *DataStore) GetTopologyType() string { + if ds.Spec.Topology == nil { + return "" + } + return ds.Spec.Topology.Type +} + +// GetTopologyConfig returns the topology configuration as runtime.RawExtension. +// Returns nil if no topology or topology config is specified. +func (ds *DataStore) GetTopologyConfig() *runtime.RawExtension { + if ds.Spec.Topology == nil { + return nil + } + return ds.Spec.Topology.Config +} + +type DataStorePhase string + +const ( + DataStorePhaseCreating DataStorePhase = "Creating" + DataStorePhaseRunning DataStorePhase = "Running" + DataStorePhaseFailed DataStorePhase = "Failed" + DataStorePhaseDeleting DataStorePhase = "Deleting" +) + +type DataStoreStatus struct { + // Phase of the database cluster. + Phase DataStorePhase `json:"phase,omitempty"` + // ConnectionURL is the URL to connect to the database cluster. + ConnectionURL string `json:"connectionURL,omitempty"` + // CredentialSecretRef is a reference to the secret containing the credentials. + // This Secret contains the keys `username` and `password`. + CredentialSecretRef corev1.LocalObjectReference `json:"credentialSecretRef,omitempty"` + // Components is the status of the components in the database cluster. + Components []ComponentStatus `json:"components,omitempty"` + // TODO: more fields +} + +const ( + StateReady = "Ready" + StateInProgress = "InProgress" + StateError = "Error" +) + +type ComponentStatus struct { + Pods []corev1.LocalObjectReference `json:"pods,omitempty"` + Total *int32 `json:"total,omitempty"` + Ready *int32 `json:"ready,omitempty"` + State string `json:"state,omitempty"` +} + +type CustomOptions map[string]json.RawMessage + +type ComponentSpec struct { + // Name of the component. + Name string `json:"name,omitempty"` + // Type of the component from the Provider. + Type string `json:"type,omitempty"` + // Version of the component from ComponentVersions. + Version string `json:"version,omitempty"` + // Image specifies an override for the image to use. + // When unspecified, it is autmatically set from the ComponentVersions + // based on the Version specified. + // +optional + Image string `json:"image,omitempty"` + // Storage requirements for this component. + // For stateless components, this is an optional field. + // +optional + Storage *Storage `json:"storage,omitempty"` + // Resources requirements for this component. + // +optional + Resources *Resources `json:"resources,omitempty"` + // Config specifies the component specific configuration. + // +optional + Config *Config `json:"config,omitempty"` + // Replicas specifies the number of replicas for this component. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + // +kubebuilder:pruning:PreserveUnknownFields + // CustomSpec provides an API for customising this component. + // The API schema is defined by the provider's ComponentSchemas. + CustomSpec *runtime.RawExtension `json:"customSpec,omitempty"` +} + +type Config struct { + SecretRef corev1.LocalObjectReference `json:"secretRef,omitempty"` + ConfigMapRef corev1.LocalObjectReference `json:"configMapRef,omitempty"` + Key string `json:"key,omitempty"` +} + +type Storage struct { + Size resource.Quantity `json:"size,omitempty"` + StorageClass *string `json:"storageClass,omitempty"` +} + +// FIXME: consider adding Requests and Limits +type Resources struct { + CPU resource.Quantity `json:"cpu,omitempty"` + Memory resource.Quantity `json:"memory,omitempty"` +} + +//+kubebuilder:object:root=true + +// DataStoreList contains a list of DataStore. +type DataStoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataStore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DataStore{}, &DataStoreList{}) +} diff --git a/pkg/apis/v2alpha1/groupversion_info.go b/pkg/apis/v2alpha1/groupversion_info.go new file mode 100644 index 0000000..ee05879 --- /dev/null +++ b/pkg/apis/v2alpha1/groupversion_info.go @@ -0,0 +1,19 @@ +// +kubebuilder:object:generate=true +// +groupName=everest.percona.com +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "everest.percona.com", Version: "v2alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/apis/v2alpha1/provider_types.go b/pkg/apis/v2alpha1/provider_types.go new file mode 100644 index 0000000..ce1fa96 --- /dev/null +++ b/pkg/apis/v2alpha1/provider_types.go @@ -0,0 +1,60 @@ +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=prv;prov +type Provider struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ProviderSpec `json:"spec,omitempty"` + Status ProviderStatus `json:"status,omitempty"` +} + +type ProviderSpec struct { + ComponentTypes map[string]ComponentType `json:"componentTypes,omitempty"` + Components map[string]Component `json:"components,omitempty"` + Topologies map[string]Topology `json:"topologies,omitempty"` +} + +type ComponentType struct { + Versions []ComponentVersion `json:"versions,omitempty"` +} + +type ComponentVersion struct { + Version string `json:"version,omitempty"` + Image string `json:"image,omitempty"` + Default bool `json:"default,omitempty"` +} + +type Component struct { + Type string `json:"type,omitempty"` +} + +type Topology struct { + Components map[string]TopologyComponent `json:"components,omitempty"` +} + +type TopologyComponent struct { + Optional bool `json:"optional,omitempty"` + //Defaults map[string]interface{} `json:"defaults,omitempty"` +} + +type ProviderStatus struct{} + +// ProviderList contains a list of Provider. +// +// +kubebuilder:object:root=true +type ProviderList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Provider `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Provider{}, &ProviderList{}) +} diff --git a/pkg/apis/v2alpha1/zz_generated.deepcopy.go b/pkg/apis/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..a7fd5a6 --- /dev/null +++ b/pkg/apis/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,497 @@ +//go:build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "encoding/json" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Component) DeepCopyInto(out *Component) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Component. +func (in *Component) DeepCopy() *Component { + if in == nil { + return nil + } + out := new(Component) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) { + *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(Storage) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(Resources) + (*in).DeepCopyInto(*out) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(Config) + **out = **in + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.CustomSpec != nil { + in, out := &in.CustomSpec, &out.CustomSpec + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentSpec. +func (in *ComponentSpec) DeepCopy() *ComponentSpec { + if in == nil { + return nil + } + out := new(ComponentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Total != nil { + in, out := &in.Total, &out.Total + *out = new(int32) + **out = **in + } + if in.Ready != nil { + in, out := &in.Ready, &out.Ready + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentType) DeepCopyInto(out *ComponentType) { + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]ComponentVersion, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentType. +func (in *ComponentType) DeepCopy() *ComponentType { + if in == nil { + return nil + } + out := new(ComponentType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentVersion) DeepCopyInto(out *ComponentVersion) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentVersion. +func (in *ComponentVersion) DeepCopy() *ComponentVersion { + if in == nil { + return nil + } + out := new(ComponentVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.SecretRef = in.SecretRef + out.ConfigMapRef = in.ConfigMapRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in CustomOptions) DeepCopyInto(out *CustomOptions) { + { + in := &in + *out = make(CustomOptions, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOptions. +func (in CustomOptions) DeepCopy() CustomOptions { + if in == nil { + return nil + } + out := new(CustomOptions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStore) DeepCopyInto(out *DataStore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStore. +func (in *DataStore) DeepCopy() *DataStore { + if in == nil { + return nil + } + out := new(DataStore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataStore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStoreList) DeepCopyInto(out *DataStoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataStore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStoreList. +func (in *DataStoreList) DeepCopy() *DataStoreList { + if in == nil { + return nil + } + out := new(DataStoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataStoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStoreSpec) DeepCopyInto(out *DataStoreSpec) { + *out = *in + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = new(TopologySpec) + (*in).DeepCopyInto(*out) + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make(map[string]ComponentSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStoreSpec. +func (in *DataStoreSpec) DeepCopy() *DataStoreSpec { + if in == nil { + return nil + } + out := new(DataStoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataStoreStatus) DeepCopyInto(out *DataStoreStatus) { + *out = *in + out.CredentialSecretRef = in.CredentialSecretRef + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStoreStatus. +func (in *DataStoreStatus) DeepCopy() *DataStoreStatus { + if in == nil { + return nil + } + out := new(DataStoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Provider) DeepCopyInto(out *Provider) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provider. +func (in *Provider) DeepCopy() *Provider { + if in == nil { + return nil + } + out := new(Provider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Provider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderList) DeepCopyInto(out *ProviderList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Provider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderList. +func (in *ProviderList) DeepCopy() *ProviderList { + if in == nil { + return nil + } + out := new(ProviderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { + *out = *in + if in.ComponentTypes != nil { + in, out := &in.ComponentTypes, &out.ComponentTypes + *out = make(map[string]ComponentType, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make(map[string]Component, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Topologies != nil { + in, out := &in.Topologies, &out.Topologies + *out = make(map[string]Topology, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. +func (in *ProviderSpec) DeepCopy() *ProviderSpec { + if in == nil { + return nil + } + out := new(ProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderStatus) DeepCopyInto(out *ProviderStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderStatus. +func (in *ProviderStatus) DeepCopy() *ProviderStatus { + if in == nil { + return nil + } + out := new(ProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resources) DeepCopyInto(out *Resources) { + *out = *in + out.CPU = in.CPU.DeepCopy() + out.Memory = in.Memory.DeepCopy() +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources. +func (in *Resources) DeepCopy() *Resources { + if in == nil { + return nil + } + out := new(Resources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.Size = in.Size.DeepCopy() + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Topology) DeepCopyInto(out *Topology) { + *out = *in + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make(map[string]TopologyComponent, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology. +func (in *Topology) DeepCopy() *Topology { + if in == nil { + return nil + } + out := new(Topology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologyComponent) DeepCopyInto(out *TopologyComponent) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologyComponent. +func (in *TopologyComponent) DeepCopy() *TopologyComponent { + if in == nil { + return nil + } + out := new(TopologyComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpec) DeepCopyInto(out *TopologySpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpec. +func (in *TopologySpec) DeepCopy() *TopologySpec { + if in == nil { + return nil + } + out := new(TopologySpec) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/controller/README.md b/pkg/controller/README.md new file mode 100644 index 0000000..370df66 --- /dev/null +++ b/pkg/controller/README.md @@ -0,0 +1,82 @@ +# SDK Controller Package + +This package contains the core SDK abstractions for building Everest providers. + +## Key Files + +| File | Purpose | +|------|---------| +| `common.go` | The `Context` handle and resource operations | +| `interface.go` | Provider interface types (`ProviderInterface`, `BaseProvider`) | +| `metadata.go` | Provider metadata types and conversions | +| `generate.go` | CLI manifest generation utilities | + +## Main Concepts + +### The Context Handle (`common.go`) + +The `Context` struct is the main interface for provider code: + +```go +type Context struct { + ctx context.Context + client client.Client + db *v2alpha1.DataStore + metadata *ProviderMetadata +} + +// Key methods: +c.Name() // DataStore name +c.Namespace() // DataStore namespace +c.Spec() // DataStore spec +c.Apply(obj) // Create/update with owner reference +c.Get(obj, name) // Read resource +c.Delete(obj) // Delete resource +c.Metadata() // Provider metadata +``` + +## Provider Interface + +Implement the `ProviderInterface` to create a provider: + +```go +type ProviderInterface interface { + Name() string + Types() func(*runtime.Scheme) error + OwnedTypes() []client.Object + Validate(c *Context) error + Sync(c *Context) error + Status(c *Context) (Status, error) + Cleanup(c *Context) error +} +``` + +Use `BaseProvider` to inherit default implementations: + +```go +type MyProvider struct { + sdk.BaseProvider +} + +func NewMyProvider() *MyProvider { + return &MyProvider{ + BaseProvider: sdk.BaseProvider{ + ProviderName: "mydb", + SchemeFuncs: []func(*runtime.Scheme) error{mydbv1.AddToScheme}, + Owned: []client.Object{&mydbv1.MyDB{}}, + }, + } +} + +// Implement required methods +func (p *MyProvider) Validate(c *sdk.Context) error { ... } +func (p *MyProvider) Sync(c *sdk.Context) error { ... } +func (p *MyProvider) Status(c *sdk.Context) (sdk.Status, error) { ... } +func (p *MyProvider) Cleanup(c *sdk.Context) error { ... } +``` + +## See Also + +- [SDK Overview](../../docs/SDK_OVERVIEW.md) +- [Examples](../../examples/README.md) + diff --git a/pkg/controller/common.go b/pkg/controller/common.go new file mode 100644 index 0000000..6f85aa0 --- /dev/null +++ b/pkg/controller/common.go @@ -0,0 +1,340 @@ +package controller + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/openeverest/provider-sdk/pkg/apis/v2alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// ============================================================================= +// CORE ABSTRACTION: The Context handle +// ============================================================================= + +// Context is the main handle for working with a DataStore. +// It provides a simplified interface that hides Kubernetes complexity. +type Context struct { + ctx context.Context + client client.Client + ds *v2alpha1.DataStore + metadata *ProviderMetadata +} + +// NewContext creates a new Context handle (used internally by the reconciler). +func NewContext(ctx context.Context, c client.Client, ds *v2alpha1.DataStore) *Context { + return &Context{ctx: ctx, client: c, ds: ds} +} + +// NewContextWithMetadata creates a new Context handle with provider metadata. +// This is preferred over NewContext as it makes metadata available to provider implementations. +func NewContextWithMetadata(ctx context.Context, c client.Client, ds *v2alpha1.DataStore, metadata *ProviderMetadata) *Context { + return &Context{ctx: ctx, client: c, ds: ds, metadata: metadata} +} + +// Spec returns the datastore specification. +func (c *Context) Spec() *v2alpha1.DataStoreSpec { + return &c.ds.Spec +} + +// Name returns the datastore name. +func (c *Context) Name() string { + return c.ds.Name +} + +// Namespace returns the datastore namespace. +func (c *Context) Namespace() string { + return c.ds.Namespace +} + +// Labels returns the datastore labels. +func (c *Context) Labels() map[string]string { + return c.ds.Labels +} + +// Annotations returns the datastore annotations. +func (c *Context) Annotations() map[string]string { + return c.ds.Annotations +} + +// ComponentsOfType returns all components of a given type. +func (c *Context) ComponentsOfType(componentType string) []v2alpha1.ComponentSpec { + return c.ds.GetComponentsOfType(componentType) +} + +// DB returns the underlying DataStore for direct access. +func (c *Context) DB() *v2alpha1.DataStore { + return c.ds +} + +// Metadata returns the provider metadata, if available. +// Returns nil if metadata was not provided when creating the Context handle. +// The metadata is automatically populated by the reconciler if the provider +// implements the MetadataProvider interface. +func (c *Context) Metadata() *ProviderMetadata { + return c.metadata +} + +// Raw returns the underlying DataStore (escape hatch for advanced use). +// Deprecated: Use DB() instead. +func (c *Context) Raw() *v2alpha1.DataStore { + return c.ds +} + +// ============================================================================= +// RESOURCE OPERATIONS +// ============================================================================= + +// Apply creates or updates a resource, setting ownership automatically. +// This is the primary way to manage resources - just describe what you want. +func (c *Context) Apply(obj client.Object) error { + // Set the owner reference automatically + if err := controllerutil.SetControllerReference(c.ds, obj, c.client.Scheme()); err != nil { + return fmt.Errorf("failed to set owner: %w", err) + } + + // Use create-or-update semantics + existing := obj.DeepCopyObject().(client.Object) + err := c.client.Get(c.ctx, client.ObjectKeyFromObject(obj), existing) + if err != nil { + if client.IgnoreNotFound(err) != nil { + return err + } + // Doesn't exist, create it + return c.client.Create(c.ctx, obj) + } + // Exists, update it + obj.SetResourceVersion(existing.GetResourceVersion()) + return c.client.Update(c.ctx, obj) +} + +// Get retrieves a resource by name (in the datastore's namespace). +func (c *Context) Get(obj client.Object, name string) error { + return c.client.Get(c.ctx, client.ObjectKey{ + Namespace: c.ds.Namespace, + Name: name, + }, obj) +} + +// Exists checks if a resource exists. +func (c *Context) Exists(obj client.Object, name string) (bool, error) { + err := c.Get(obj, name) + if err != nil { + if client.IgnoreNotFound(err) != nil { + return false, err + } + return false, nil + } + return true, nil +} + +// Delete removes a resource. +func (c *Context) Delete(obj client.Object) error { + err := c.client.Delete(c.ctx, obj) + return client.IgnoreNotFound(err) +} + +// List retrieves resources matching optional filters. +func (c *Context) List(list client.ObjectList, opts ...client.ListOption) error { + allOpts := append([]client.ListOption{client.InNamespace(c.ds.Namespace)}, opts...) + return c.client.List(c.ctx, list, allOpts...) +} + +// ============================================================================= +// HELPER METHODS +// ============================================================================= + +// ObjectMeta returns a pre-configured ObjectMeta for creating resources. +func (c *Context) ObjectMeta(name string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: name, + Namespace: c.Namespace(), + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "everest", + "app.kubernetes.io/instance": c.Name(), + }, + } +} + +// DecodeTopologyConfig unmarshals the topology configuration into the provided struct. +// The target should be a pointer to the expected config type. +// Returns an error if the config is nil, empty, or unmarshaling fails. +// +// Example: +// +// var config psmdbspec.ShardedTopologyConfig +// if err := c.DecodeTopologyConfig(&config); err != nil { +// // handle error or use defaults +// } +func (c *Context) DecodeTopologyConfig(target interface{}) error { + topologyConfig := c.ds.GetTopologyConfig() + if topologyConfig == nil || topologyConfig.Raw == nil { + return fmt.Errorf("topology config not set") + } + return json.Unmarshal(topologyConfig.Raw, target) +} + +// DecodeGlobalConfig unmarshals the global configuration into the provided struct. +// The target should be a pointer to the expected config type. +// Returns an error if the config is nil, empty, or unmarshaling fails. +// +// Example: +// +// var config psmdbspec.GlobalConfig +// if err := c.DecodeGlobalConfig(&config); err != nil { +// // handle error or use defaults +// } +func (c *Context) DecodeGlobalConfig(target interface{}) error { + globalConfig := c.ds.Spec.Global + if globalConfig == nil || globalConfig.Raw == nil { + return fmt.Errorf("global config not set") + } + return json.Unmarshal(globalConfig.Raw, target) +} + +// DecodeComponentCustomSpec unmarshals a component's custom spec into the provided struct. +// The target should be a pointer to the expected custom spec type. +// Returns an error if the custom spec is nil, empty, or unmarshaling fails. +// +// Example: +// +// engine := c.ds.Spec.Components["engine"] +// var customSpec psmdbspec.MongodCustomSpec +// if err := c.DecodeComponentCustomSpec(engine, &customSpec); err != nil { +// // handle error or use defaults +// } +func (c *Context) DecodeComponentCustomSpec(component v2alpha1.ComponentSpec, target interface{}) error { + if component.CustomSpec == nil || component.CustomSpec.Raw == nil { + return fmt.Errorf("component custom spec not set") + } + return json.Unmarshal(component.CustomSpec.Raw, target) +} + +// TryDecodeTopologyConfig attempts to decode topology config, returning false if not set. +// This is a convenience method that doesn't return an error for missing configs. +// +// Example: +// +// var config psmdbspec.ShardedTopologyConfig +// if c.TryDecodeTopologyConfig(&config) { +// numShards = config.NumShards +// } else { +// numShards = 2 // default +// } +func (c *Context) TryDecodeTopologyConfig(target interface{}) bool { + err := c.DecodeTopologyConfig(target) + return err == nil +} + +// TryDecodeGlobalConfig attempts to decode global config, returning false if not set. +func (c *Context) TryDecodeGlobalConfig(target interface{}) bool { + err := c.DecodeGlobalConfig(target) + return err == nil +} + +// TryDecodeComponentCustomSpec attempts to decode component custom spec, returning false if not set. +func (c *Context) TryDecodeComponentCustomSpec(component v2alpha1.ComponentSpec, target interface{}) bool { + err := c.DecodeComponentCustomSpec(component, target) + return err == nil +} + +// ============================================================================= +// STATUS TYPES +// ============================================================================= + +// Status represents the current state of the database cluster. +type Status struct { + Phase v2alpha1.DataStorePhase + Message string + ConnectionURL string + Credentials string // Secret name containing credentials + Components []ComponentStatus +} + +// ComponentStatus represents the status of a single component. +type ComponentStatus struct { + Name string + Ready int32 + Total int32 + State string // "Ready", "InProgress", "Error" +} + +// ToV2Alpha1 converts Status to the API type. +func (s Status) ToV2Alpha1() v2alpha1.DataStoreStatus { + status := v2alpha1.DataStoreStatus{ + Phase: s.Phase, + ConnectionURL: s.ConnectionURL, + } + if s.Credentials != "" { + status.CredentialSecretRef.Name = s.Credentials + } + return status +} + +// Status helper functions + +// Creating returns a status indicating the cluster is being created. +func Creating(message string) Status { + return Status{Phase: v2alpha1.DataStorePhaseCreating, Message: message} +} + +// Running returns a status indicating the cluster is running. +func Running() Status { + return Status{Phase: v2alpha1.DataStorePhaseRunning} +} + +// RunningWithConnection returns a running status with connection details. +func RunningWithConnection(url, credentialsSecret string) Status { + return Status{ + Phase: v2alpha1.DataStorePhaseRunning, + ConnectionURL: url, + Credentials: credentialsSecret, + } +} + +// Failed returns a status indicating the cluster has failed. +func Failed(message string) Status { + return Status{Phase: v2alpha1.DataStorePhaseFailed, Message: message} +} + +// ============================================================================= +// WAIT HELPERS +// ============================================================================= + +// WaitError signals that a step is waiting for something. +type WaitError struct { + Reason string + Duration time.Duration +} + +func (e *WaitError) Error() string { + return fmt.Sprintf("waiting: %s", e.Reason) +} + +// IsWaitError checks if an error is a WaitError. +func IsWaitError(err error) bool { + _, ok := err.(*WaitError) + return ok +} + +// GetWaitDuration returns the wait duration from a WaitError. +func GetWaitDuration(err error) time.Duration { + if we, ok := err.(*WaitError); ok { + return we.Duration + } + return 10 * time.Second +} + +// WaitFor returns an error indicating the step should be retried. +func WaitFor(reason string) error { + return &WaitError{Reason: reason, Duration: 10 * time.Second} +} + +// WaitForDuration returns an error indicating retry after a specific duration. +func WaitForDuration(reason string, d time.Duration) error { + return &WaitError{Reason: reason, Duration: d} +} diff --git a/pkg/controller/generate.go b/pkg/controller/generate.go new file mode 100644 index 0000000..0196af6 --- /dev/null +++ b/pkg/controller/generate.go @@ -0,0 +1,93 @@ +package controller + +// Provider Manifest Generator +// +// This file provides utilities for generating Provider CR YAML manifests +// from Go code at build time. +// +// Provider developers define their metadata in Go and generate a YAML manifest +// at build time. The manifest is then included in the Helm chart. +// +// Workflow: +// 1. Define metadata in Go (e.g., in psmdb_metadata.go) +// 2. Create a generator tool (e.g., cmd/generate/main.go) +// 3. Run `go generate` or `make generate` to create provider.yaml +// 4. Include provider.yaml in Helm chart +// +// Advantages: +// - YAML is visible and reviewable in version control +// - No runtime dependencies - manifest exists before deployment +// - Easy to inspect and debug +// - Works with GitOps workflows (manifest is static) +// +// See docs/PROVIDER_CR_GENERATION.md for complete documentation. + +import ( + "fmt" + "os" + "path/filepath" +) + +// GenerateManifest generates a Provider CR YAML manifest and writes it to a file. +// This is a convenience function for use in go:generate directives or build scripts. +// +// Example usage in a generator tool: +// +// func main() { +// metadata := defineMetadata() +// if err := sdk.GenerateManifest(metadata, "my-provider", "", "charts/provider.yaml"); err != nil { +// log.Fatal(err) +// } +// } +func GenerateManifest(metadata *ProviderMetadata, name, namespace, outputPath string) error { + // Validate metadata first + if err := metadata.Validate(); err != nil { + return fmt.Errorf("invalid metadata: %w", err) + } + + // Generate YAML + yaml, err := metadata.ToYAML(name, namespace) + if err != nil { + return fmt.Errorf("failed to generate YAML: %w", err) + } + + // Ensure output directory exists + dir := filepath.Dir(outputPath) + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // Write to file + if err := os.WriteFile(outputPath, []byte(yaml), 0644); err != nil { + return fmt.Errorf("failed to write manifest: %w", err) + } + + return nil +} + +// GenerateManifestToStdout generates a Provider CR YAML manifest and writes it to stdout. +// Useful for piping to other tools or quick inspection. +func GenerateManifestToStdout(metadata *ProviderMetadata, name, namespace string) error { + // Validate metadata first + if err := metadata.Validate(); err != nil { + return fmt.Errorf("invalid metadata: %w", err) + } + + // Generate YAML + yaml, err := metadata.ToYAML(name, namespace) + if err != nil { + return fmt.Errorf("failed to generate YAML: %w", err) + } + + fmt.Print(yaml) + return nil +} + +// MustGenerateManifest is like GenerateManifest but panics on error. +// Useful for go:generate directives where error handling is awkward. +func MustGenerateManifest(metadata *ProviderMetadata, name, namespace, outputPath string) { + if err := GenerateManifest(metadata, name, namespace, outputPath); err != nil { + panic(err) + } +} + diff --git a/pkg/controller/interface.go b/pkg/controller/interface.go new file mode 100644 index 0000000..fafb60a --- /dev/null +++ b/pkg/controller/interface.go @@ -0,0 +1,165 @@ +package controller + +// Provider SDK +// +// Implement the Provider interface to create a provider. +// Embed BaseProvider for default implementations. +// +// See examples/psmdb for a complete example. + +import ( + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ProviderInterface defines the interface for a database provider. +type ProviderInterface interface { + // Name returns the unique identifier for this provider (e.g., "psmdb", "postgresql"). + Name() string + + // Types returns the scheme builder for registering provider-specific CRDs. + Types() func(*runtime.Scheme) error + + // OwnedTypes returns types this provider creates (triggers reconciliation on changes). + OwnedTypes() []client.Object + + // Validate checks if the DataStore spec is valid. + Validate(c *Context) error + + // Sync ensures all required resources exist and are configured. + Sync(c *Context) error + + // Status computes the current status of the database. + Status(c *Context) (Status, error) + + // Cleanup handles deletion (called when deletion timestamp is set). + Cleanup(c *Context) error +} + +// MetadataProvider is an optional interface for exposing provider metadata. +// Providers that embed BaseProvider with Metadata set automatically satisfy this. +type MetadataProvider interface { + GetMetadata() *ProviderMetadata +} + +// BaseProvider provides default implementations for common Provider methods. +// Embed this in your provider struct to inherit defaults. +type BaseProvider struct { + ProviderName string + SchemeFuncs []func(*runtime.Scheme) error + Owned []client.Object + Metadata *ProviderMetadata +} + +func (b *BaseProvider) Name() string { + return b.ProviderName +} + +func (b *BaseProvider) Types() func(*runtime.Scheme) error { + if len(b.SchemeFuncs) == 0 { + return nil + } + return func(s *runtime.Scheme) error { + for _, fn := range b.SchemeFuncs { + if err := fn(s); err != nil { + return err + } + } + return nil + } +} + +func (b *BaseProvider) OwnedTypes() []client.Object { + return b.Owned +} + +// GetMetadata returns the provider metadata. +// Returns nil if no metadata is configured. +func (b *BaseProvider) GetMetadata() *ProviderMetadata { + return b.Metadata +} + +// ============================================================================= +// SCHEMA PROVIDER (Optional interface for HTTP server) +// ============================================================================= + +// TopologyComponentDefinition defines a component within a topology with its metadata. +type TopologyComponentDefinition struct { + // Optional indicates if this component is optional in the topology. + // If false, the component is required. + Optional bool + + // Defaults provides default values for this component in this topology. + // Example: map[string]interface{}{"replicas": 3} + Defaults map[string]interface{} +} + +// TopologyDefinition combines a topology's configuration schema with its supported components. +type TopologyDefinition struct { + // Schema is the Go type that defines the topology-specific configuration. + // This will be converted to an OpenAPI schema. + Schema interface{} + + // Components maps component names to their definitions within this topology. + // Example: map[string]TopologyComponentDefinition{ + // "engine": {Optional: false, Defaults: map[string]interface{}{"replicas": 3}}, + // "backupAgent": {Optional: true}, + // "monitoring": {Optional: true}, + // } + Components map[string]TopologyComponentDefinition +} + +// SchemaProvider is an optional interface that providers can implement +// to expose OpenAPI schemas for their components, topologies, and global config. +// This enables the HTTP server to serve schema information for documentation +// and client-side validation. +// +// Example implementation: +// +// func (p *PSMDBProvider) ComponentSchemas() map[string]interface{} { +// return map[string]interface{}{ +// "engine": &MongodCustomSpec{}, +// "configServer": &MongodCustomSpec{}, +// "proxy": &MongosCustomSpec{}, +// } +// } +// +// func (p *PSMDBProvider) Topologies() map[string]TopologyDefinition { +// return map[string]TopologyDefinition{ +// "replicaset": { +// Schema: &ReplicaSetTopologyConfig{}, +// Components: map[string]TopologyComponentDefinition{ +// "engine": {Optional: false, Defaults: map[string]interface{}{"replicas": 3}}, +// "backupAgent": {Optional: true}, +// "monitoring": {Optional: true}, +// }, +// }, +// "sharded": { +// Schema: &ShardedTopologyConfig{}, +// Components: map[string]TopologyComponentDefinition{ +// "engine": {Optional: false}, +// "proxy": {Optional: false}, +// "configServer": {Optional: false}, +// "backupAgent": {Optional: true}, +// "monitoring": {Optional: true}, +// }, +// }, +// } +// } +// +// func (p *PSMDBProvider) GlobalSchema() interface{} { +// return &GlobalConfig{} +// } +type SchemaProvider interface { + // ComponentSchemas returns a map of component names to their custom spec types. + // The Go types are converted to OpenAPI v3 JSON schemas. + ComponentSchemas() map[string]interface{} + + // Topologies returns a map of topology names to their definitions. + // Each definition includes both the configuration schema and the list of supported components. + Topologies() map[string]TopologyDefinition + + // GlobalSchema returns the type for global configuration. + // Returns nil if no global schema is needed. + GlobalSchema() interface{} +} diff --git a/pkg/controller/metadata.go b/pkg/controller/metadata.go new file mode 100644 index 0000000..db67394 --- /dev/null +++ b/pkg/controller/metadata.go @@ -0,0 +1,377 @@ +package controller + +// Provider Metadata Types +// +// This file defines the types for declaring provider metadata in Go code. +// Provider developers use these types to define their component types, versions, +// and topologies programmatically. +// +// The metadata is generated into a YAML manifest via CLI tooling during the +// build process, then included in the Helm chart. +// +// See docs/PROVIDER_CR_GENERATION.md for the complete workflow. + +import ( + "bytes" + "fmt" + + "github.com/openeverest/provider-sdk/pkg/apis/v2alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +// ============================================================================= +// PROVIDER METADATA TYPES +// ============================================================================= + +// ProviderMetadata defines the metadata for a provider. +// This includes component types, components, and topologies that the provider supports. +// Provider developers define this in Go code, and the SDK handles converting it +// to a Provider CR via CLI generation. +type ProviderMetadata struct { + // ComponentTypes defines the available component types with their versions. + // Example: "mongod" component type with versions "6.0.19-16", "7.0.18-11", etc. + ComponentTypes map[string]ComponentTypeMeta `json:"componentTypes,omitempty"` + + // Components defines the logical components that use the component types. + // Example: "engine", "configServer", "proxy" all use the "mongod" type. + Components map[string]ComponentMeta `json:"components,omitempty"` + + // Topologies defines the supported deployment topologies. + // Example: "standard" (single replica set), "sharded" (sharded cluster). + Topologies map[string]TopologyMeta `json:"topologies,omitempty"` +} + +// ComponentTypeMeta defines a component type with its available versions. +type ComponentTypeMeta struct { + // Versions lists all available versions for this component type. + Versions []ComponentVersionMeta `json:"versions,omitempty"` +} + +// ComponentVersionMeta defines a specific version of a component type. +type ComponentVersionMeta struct { + // Version is the semantic version string (e.g., "8.0.8-3"). + Version string `json:"version,omitempty"` + + // Image is the container image for this version. + Image string `json:"image,omitempty"` + + // Default indicates if this is the default version for the component type. + Default bool `json:"default,omitempty"` +} + +// ComponentMeta defines a logical component that uses a component type. +type ComponentMeta struct { + // Type references a component type defined in ComponentTypes. + Type string `json:"type,omitempty"` +} + +// TopologyMeta defines a deployment topology. +type TopologyMeta struct { + // Components defines which components are part of this topology. + Components map[string]TopologyComponentMeta `json:"components,omitempty"` +} + +// TopologyComponentMeta defines a component within a topology. +type TopologyComponentMeta struct { + // Optional indicates if this component is optional in the topology. + Optional bool `json:"optional,omitempty"` + + // Defaults provides default values for this component in this topology. + Defaults map[string]interface{} `json:"defaults,omitempty"` +} + +// ============================================================================= +// CONVERSION TO PROVIDER CR +// ============================================================================= + +// ToProviderCR converts ProviderMetadata to a Provider custom resource. +// This is used by the CLI tool to generate YAML manifests. +func (m *ProviderMetadata) ToProviderCR(name, namespace string) *v2alpha1.Provider { + provider := &v2alpha1.Provider{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "everest.percona.com/v2alpha1", + Kind: "Provider", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v2alpha1.ProviderSpec{ + ComponentTypes: make(map[string]v2alpha1.ComponentType), + Components: make(map[string]v2alpha1.Component), + Topologies: make(map[string]v2alpha1.Topology), + }, + } + + // Convert component types + for typeName, typeMeta := range m.ComponentTypes { + versions := make([]v2alpha1.ComponentVersion, 0, len(typeMeta.Versions)) + for _, v := range typeMeta.Versions { + versions = append(versions, v2alpha1.ComponentVersion{ + Version: v.Version, + Image: v.Image, + Default: v.Default, + }) + } + provider.Spec.ComponentTypes[typeName] = v2alpha1.ComponentType{ + Versions: versions, + } + } + + // Convert components + for compName, compMeta := range m.Components { + provider.Spec.Components[compName] = v2alpha1.Component{ + Type: compMeta.Type, + } + } + + // Convert topologies + for topoName, topoMeta := range m.Topologies { + components := make(map[string]v2alpha1.TopologyComponent) + for compName, compMeta := range topoMeta.Components { + components[compName] = v2alpha1.TopologyComponent{ + Optional: compMeta.Optional, + //Defaults: compMeta.Defaults, + } + } + provider.Spec.Topologies[topoName] = v2alpha1.Topology{ + Components: components, + } + } + + return provider +} + +// ToYAML converts ProviderMetadata to a YAML manifest string. +// This is used by the CLI tool to generate the Provider CR for Helm packaging. +func (m *ProviderMetadata) ToYAML(name, namespace string) (string, error) { + provider := m.ToProviderCR(name, namespace) + + data, err := yaml.Marshal(provider) + if err != nil { + return "", fmt.Errorf("failed to marshal provider to YAML: %w", err) + } + + // Add a header comment + var buf bytes.Buffer + buf.WriteString("# Provider CR generated from Go code\n") + buf.WriteString("# Do not edit manually - regenerate using: provider-sdk generate-manifest\n") + buf.WriteString("---\n") + buf.Write(data) + + return buf.String(), nil +} + +// ============================================================================= +// VALIDATION HELPERS +// ============================================================================= + +// Validate checks that the ProviderMetadata is internally consistent. +// It verifies that: +// - All component types referenced by components exist +// - All components referenced by topologies exist +func (m *ProviderMetadata) Validate() error { + // Check that component types referenced by components exist + for compName, comp := range m.Components { + if _, ok := m.ComponentTypes[comp.Type]; !ok { + return fmt.Errorf("component %q references unknown component type %q", compName, comp.Type) + } + } + + // Check that components referenced by topologies exist + for topoName, topo := range m.Topologies { + for compName := range topo.Components { + if _, ok := m.Components[compName]; !ok { + return fmt.Errorf("topology %q references unknown component %q", topoName, compName) + } + } + } + + return nil +} + +// ============================================================================= +// BUILDER HELPERS (Fluent API for defining metadata) +// ============================================================================= + +// NewProviderMetadata creates a new empty ProviderMetadata. +func NewProviderMetadata() *ProviderMetadata { + return &ProviderMetadata{ + ComponentTypes: make(map[string]ComponentTypeMeta), + Components: make(map[string]ComponentMeta), + Topologies: make(map[string]TopologyMeta), + } +} + +// AddComponentType adds a component type with its versions. +func (m *ProviderMetadata) AddComponentType(name string, versions ...ComponentVersionMeta) *ProviderMetadata { + m.ComponentTypes[name] = ComponentTypeMeta{Versions: versions} + return m +} + +// AddComponent adds a component that uses a component type. +func (m *ProviderMetadata) AddComponent(name, typeName string) *ProviderMetadata { + m.Components[name] = ComponentMeta{Type: typeName} + return m +} + +// AddTopology adds a topology with its component configuration. +func (m *ProviderMetadata) AddTopology(name string, components map[string]TopologyComponentMeta) *ProviderMetadata { + m.Topologies[name] = TopologyMeta{Components: components} + return m +} + +// Version creates a ComponentVersionMeta (helper for fluent API). +func Version(version, image string) ComponentVersionMeta { + return ComponentVersionMeta{Version: version, Image: image} +} + +// DefaultVersion creates a default ComponentVersionMeta (helper for fluent API). +func DefaultVersion(version, image string) ComponentVersionMeta { + return ComponentVersionMeta{Version: version, Image: image, Default: true} +} + +// TopologyComponent creates a TopologyComponentMeta (helper for fluent API). +func TopologyComponent(optional bool, defaults map[string]interface{}) TopologyComponentMeta { + return TopologyComponentMeta{Optional: optional, Defaults: defaults} +} + +// RequiredComponent creates a required TopologyComponentMeta with defaults. +func RequiredComponent(defaults map[string]interface{}) TopologyComponentMeta { + return TopologyComponentMeta{Optional: false, Defaults: defaults} +} + +// OptionalComponent creates an optional TopologyComponentMeta. +func OptionalComponent() TopologyComponentMeta { + return TopologyComponentMeta{Optional: true} +} + +// ============================================================================= +// METADATA QUERY HELPERS +// ============================================================================= +// +// These helpers make it easy to look up version and image information from +// provider metadata during reconciliation. +// +// EXAMPLE USAGE: +// +// func SyncDatabase(c *Context) error { +// metadata := MyProviderMetadata() +// +// // Get the engine component from the datastore spec +// engine := c.DB().Spec.Components["engine"] +// +// // Look up the default image for the component's type +// image := metadata.GetDefaultImage(engine.Type) +// +// // Or use the convenience method to go directly from component name +// image = metadata.GetDefaultImageForComponent("engine") +// +// // Apply it to your custom resource +// myDB.Spec.Image = image +// return c.Apply(myDB) +// } +// +// TYPICAL PATTERN: +// +// // User specifies a component with a type (e.g., "mongod") +// component := c.DB().Spec.Components["engine"] +// +// // Determine which image to use +// var image string +// if component.Image != "" { +// // User explicitly specified an image override +// image = component.Image +// } else { +// // Use the default image for this component type +// image = metadata.GetDefaultImage(component.Type) +// } +// + +// GetDefaultVersion returns the default ComponentVersionMeta for a given component type. +// Returns nil if the component type doesn't exist or has no default version. +func (m *ProviderMetadata) GetDefaultVersion(componentType string) *ComponentVersionMeta { + typeMeta, ok := m.ComponentTypes[componentType] + if !ok { + return nil + } + + for _, v := range typeMeta.Versions { + if v.Default { + return &v + } + } + + return nil +} + +// GetDefaultImage returns the default image for a given component type. +// Returns empty string if the component type doesn't exist or has no default version. +func (m *ProviderMetadata) GetDefaultImage(componentType string) string { + version := m.GetDefaultVersion(componentType) + if version == nil { + return "" + } + return version.Image +} + +// GetComponentType returns the component type name for a given component name. +// For example, if "engine" component uses "mongod" type, GetComponentType("engine") returns "mongod". +// Returns empty string if the component doesn't exist. +func (m *ProviderMetadata) GetComponentType(componentName string) string { + comp, ok := m.Components[componentName] + if !ok { + return "" + } + return comp.Type +} + +// GetDefaultImageForComponent returns the default image for a given component name. +// This is a convenience method that combines GetComponentType and GetDefaultImage. +// For example, if "engine" uses "mongod" type, and "mongod" has a default version, +// this returns the image for that default version. +// Returns empty string if the component doesn't exist or has no default version. +func (m *ProviderMetadata) GetDefaultImageForComponent(componentName string) string { + componentType := m.GetComponentType(componentName) + if componentType == "" { + return "" + } + return m.GetDefaultImage(componentType) +} + +// ============================================================================= +// CONVERSION FROM SCHEMA PROVIDER +// ============================================================================= + +// TopologiesFromSchemaProvider converts SchemaProvider topology definitions to metadata topologies. +// This allows you to define topologies once in your SchemaProvider implementation +// and derive the metadata structure from it. +// +// Example: +// +// func (p *PSMDBProvider) GetMetadata() *ProviderMetadata { +// metadata := &ProviderMetadata{ +// ComponentTypes: ..., +// Components: ..., +// } +// // Derive topologies from SchemaProvider +// metadata.Topologies = TopologiesFromSchemaProvider(p.Topologies()) +// return metadata +// } +func TopologiesFromSchemaProvider(topologies map[string]TopologyDefinition) map[string]TopologyMeta { + result := make(map[string]TopologyMeta) + for topoName, topoDef := range topologies { + components := make(map[string]TopologyComponentMeta) + for compName, compDef := range topoDef.Components { + components[compName] = TopologyComponentMeta{ + Optional: compDef.Optional, + Defaults: compDef.Defaults, + } + } + result[topoName] = TopologyMeta{ + Components: components, + } + } + return result +} diff --git a/pkg/openapi/doc.go b/pkg/openapi/doc.go new file mode 100644 index 0000000..e7a927a --- /dev/null +++ b/pkg/openapi/doc.go @@ -0,0 +1,34 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package openapi contains OpenAPI schema definitions and utilities for the provider SDK. +// It provides pre-generated OpenAPI schemas for provider custom spec types using kube-openapi. +// +// This package uses the Kubernetes kube-openapi tooling for schema generation: +// - Types are annotated with kubebuilder markers for validation +// - Schemas are generated at build time using openapi-gen +// - The SchemaRegistry serves pre-generated schemas at runtime +// +// Usage: +// +// import "github.com/openeverest/provider-sdk/pkg/openapi" +// +// // Get pre-generated definitions +// defs := openapi.GetOpenAPIDefinitions(ref) +// +// // Use SchemaRegistry with pre-generated schemas +// registry := openapi.NewSchemaRegistry(defs) +package openapi diff --git a/pkg/openapi/registry.go b/pkg/openapi/registry.go new file mode 100644 index 0000000..020d6ca --- /dev/null +++ b/pkg/openapi/registry.go @@ -0,0 +1,336 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "encoding/json" + "sync" + + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// SchemaRegistry holds registered schemas for components, topologies, and global config. +// It uses kube-openapi for pre-generated OpenAPI schema serving. +// +// There are two ways to use SchemaRegistry: +// +// 1. With pre-generated definitions (recommended for production): +// +// defs := generated.GetOpenAPIDefinitions(openapi.DefaultReferenceCallback) +// registry := openapi.NewSchemaRegistryFromDefinitions(defs) +// registry.MapComponent("mongod", "github.com/myorg/provider/types.MongodCustomSpec") +// +// 2. With direct schema registration (for testing or dynamic schemas): +// +// registry := openapi.NewSchemaRegistry() +// registry.RegisterComponentSchema("mongod", mySchema) +type SchemaRegistry struct { + mu sync.RWMutex + + // definitions holds all pre-generated OpenAPI definitions keyed by canonical type name + definitions map[string]common.OpenAPIDefinition + + // Components maps component name to its schema (e.g., "mongod", "mongos") + Components map[string]*spec.Schema + + // Topologies maps topology name to its schema (e.g., "replicaset", "sharded") + Topologies map[string]*spec.Schema + + // Global is the schema for global/cluster-wide configuration + Global *spec.Schema + + // typeMapping maps short names to canonical type names + componentTypeMap map[string]string + topologyTypeMap map[string]string + globalTypeName string +} + +// NewSchemaRegistry creates a new empty SchemaRegistry. +func NewSchemaRegistry() *SchemaRegistry { + return &SchemaRegistry{ + definitions: make(map[string]common.OpenAPIDefinition), + Components: make(map[string]*spec.Schema), + Topologies: make(map[string]*spec.Schema), + componentTypeMap: make(map[string]string), + topologyTypeMap: make(map[string]string), + } +} + +// NewSchemaRegistryFromDefinitions creates a SchemaRegistry pre-populated with +// OpenAPI definitions from a GetOpenAPIDefinitions function. +// +// Example: +// +// import "github.com/myorg/provider/pkg/generated/openapi" +// +// defs := openapi.GetOpenAPIDefinitions(openapi.DefaultReferenceCallback) +// registry := NewSchemaRegistryFromDefinitions(defs) +func NewSchemaRegistryFromDefinitions(defs map[string]common.OpenAPIDefinition) *SchemaRegistry { + r := NewSchemaRegistry() + r.definitions = defs + return r +} + +// NewSchemaRegistryWithFunc creates a SchemaRegistry using a GetOpenAPIDefinitions function. +// This is a convenience wrapper that calls the function with DefaultReferenceCallback. +// +// Example: +// +// import "github.com/myorg/provider/pkg/generated/openapi" +// +// registry := NewSchemaRegistryWithFunc(openapi.GetOpenAPIDefinitions) +func NewSchemaRegistryWithFunc(fn GetOpenAPIDefinitionsFunc) *SchemaRegistry { + defs := fn(DefaultReferenceCallback) + return NewSchemaRegistryFromDefinitions(defs) +} + +// ============================================================================= +// TYPE MAPPING (for pre-generated schemas) +// ============================================================================= + +// MapComponent maps a component name to a pre-generated schema by type name. +// The typeName should be the canonical Go type name used in GetOpenAPIDefinitions. +// +// Example: +// +// registry.MapComponent("mongod", "github.com/myorg/provider/types.MongodCustomSpec") +func (r *SchemaRegistry) MapComponent(componentName, typeName string) error { + r.mu.Lock() + defer r.mu.Unlock() + + def, ok := r.definitions[typeName] + if !ok { + return &SchemaNotFoundError{TypeName: typeName} + } + + schema := def.Schema + r.Components[componentName] = &schema + r.componentTypeMap[componentName] = typeName + return nil +} + +// MapTopology maps a topology name to a pre-generated schema by type name. +func (r *SchemaRegistry) MapTopology(topologyName, typeName string) error { + r.mu.Lock() + defer r.mu.Unlock() + + def, ok := r.definitions[typeName] + if !ok { + return &SchemaNotFoundError{TypeName: typeName} + } + + schema := def.Schema + r.Topologies[topologyName] = &schema + r.topologyTypeMap[topologyName] = typeName + return nil +} + +// MapGlobal maps the global schema to a pre-generated schema by type name. +func (r *SchemaRegistry) MapGlobal(typeName string) error { + r.mu.Lock() + defer r.mu.Unlock() + + def, ok := r.definitions[typeName] + if !ok { + return &SchemaNotFoundError{TypeName: typeName} + } + + schema := def.Schema + r.Global = &schema + r.globalTypeName = typeName + return nil +} + +// MustMapComponent is like MapComponent but panics on error. +func (r *SchemaRegistry) MustMapComponent(componentName, typeName string) { + if err := r.MapComponent(componentName, typeName); err != nil { + panic(err) + } +} + +// MustMapTopology is like MapTopology but panics on error. +func (r *SchemaRegistry) MustMapTopology(topologyName, typeName string) { + if err := r.MapTopology(topologyName, typeName); err != nil { + panic(err) + } +} + +// MustMapGlobal is like MapGlobal but panics on error. +func (r *SchemaRegistry) MustMapGlobal(typeName string) { + if err := r.MapGlobal(typeName); err != nil { + panic(err) + } +} + +// ============================================================================= +// DIRECT SCHEMA REGISTRATION (for testing or dynamic schemas) +// ============================================================================= + +// RegisterComponentSchema directly registers a schema for a component. +// Use this for testing or when you have dynamically created schemas. +func (r *SchemaRegistry) RegisterComponentSchema(name string, schema *spec.Schema) { + r.mu.Lock() + defer r.mu.Unlock() + r.Components[name] = schema +} + +// RegisterTopologySchema directly registers a schema for a topology. +func (r *SchemaRegistry) RegisterTopologySchema(name string, schema *spec.Schema) { + r.mu.Lock() + defer r.mu.Unlock() + r.Topologies[name] = schema +} + +// RegisterGlobalSchema directly registers the global schema. +func (r *SchemaRegistry) RegisterGlobalSchema(schema *spec.Schema) { + r.mu.Lock() + defer r.mu.Unlock() + r.Global = schema +} + +// ============================================================================= +// SCHEMA RETRIEVAL +// ============================================================================= + +// GetComponentSchema returns the schema for a component by name. +func (r *SchemaRegistry) GetComponentSchema(name string) (*spec.Schema, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + schema, ok := r.Components[name] + return schema, ok +} + +// GetTopologySchema returns the schema for a topology by name. +func (r *SchemaRegistry) GetTopologySchema(name string) (*spec.Schema, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + schema, ok := r.Topologies[name] + return schema, ok +} + +// GetGlobalSchema returns the global configuration schema. +func (r *SchemaRegistry) GetGlobalSchema() (*spec.Schema, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + return r.Global, r.Global != nil +} + +// GetDefinition returns the full OpenAPI definition for a type name. +func (r *SchemaRegistry) GetDefinition(typeName string) (common.OpenAPIDefinition, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + def, ok := r.definitions[typeName] + return def, ok +} + +// ============================================================================= +// LISTING +// ============================================================================= + +// ListComponents returns all registered component names. +func (r *SchemaRegistry) ListComponents() []string { + r.mu.RLock() + defer r.mu.RUnlock() + names := make([]string, 0, len(r.Components)) + for name := range r.Components { + names = append(names, name) + } + return names +} + +// ListTopologies returns all registered topology names. +func (r *SchemaRegistry) ListTopologies() []string { + r.mu.RLock() + defer r.mu.RUnlock() + names := make([]string, 0, len(r.Topologies)) + for name := range r.Topologies { + names = append(names, name) + } + return names +} + +// ListDefinitions returns all available pre-generated type names. +func (r *SchemaRegistry) ListDefinitions() []string { + r.mu.RLock() + defer r.mu.RUnlock() + names := make([]string, 0, len(r.definitions)) + for name := range r.definitions { + names = append(names, name) + } + return names +} + +// ============================================================================= +// DOCUMENT GENERATION +// ============================================================================= + +// AllSchemas returns a complete OpenAPI 3.0 document containing all registered schemas. +// This is useful for documentation and can be served at an endpoint. +func (r *SchemaRegistry) AllSchemas() *OpenAPIDocument { + r.mu.RLock() + defer r.mu.RUnlock() + + schemas := make(map[string]*spec.Schema) + + // Add component schemas + for name, schema := range r.Components { + schemas["component."+name] = schema + } + + // Add topology schemas + for name, schema := range r.Topologies { + schemas["topology."+name] = schema + } + + // Add global schema + if r.Global != nil { + schemas["global"] = r.Global + } + + return &OpenAPIDocument{ + OpenAPI: "3.0.3", + Info: OpenAPIInfo{ + Title: "Provider Configuration Schemas", + Version: "1.0.0", + Description: "OpenAPI schemas for provider component, topology, and global configurations", + }, + Components: &OpenAPIComponents{ + Schemas: schemas, + }, + Paths: make(map[string]interface{}), + } +} + +// AllSchemasJSON returns the complete OpenAPI document as JSON bytes. +func (r *SchemaRegistry) AllSchemasJSON() ([]byte, error) { + doc := r.AllSchemas() + return json.MarshalIndent(doc, "", " ") +} + +// ============================================================================= +// ERROR TYPES +// ============================================================================= + +// SchemaNotFoundError is returned when a schema type is not found in definitions. +type SchemaNotFoundError struct { + TypeName string +} + +func (e *SchemaNotFoundError) Error() string { + return "schema not found for type: " + e.TypeName +} diff --git a/pkg/openapi/types.go b/pkg/openapi/types.go new file mode 100644 index 0000000..4008026 --- /dev/null +++ b/pkg/openapi/types.go @@ -0,0 +1,140 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +package openapi + +import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// ============================================================================= +// SCHEMA DEFINITION TYPES +// ============================================================================= + +// OpenAPISchemaDefinition holds a pre-generated OpenAPI schema and its metadata. +type OpenAPISchemaDefinition struct { + // Schema is the OpenAPI v3 schema + Schema *spec.Schema + // TypeName is the Go type name (e.g., "MongodCustomSpec") + TypeName string + // PackagePath is the full Go package path + PackagePath string +} + +// GetOpenAPIDefinitionsFunc is the signature for generated GetOpenAPIDefinitions functions. +type GetOpenAPIDefinitionsFunc func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition + +// ============================================================================= +// SCHEMA UTILITIES +// ============================================================================= + +// SchemaToJSON converts a spec.Schema to JSON bytes. +func SchemaToJSON(schema *spec.Schema) ([]byte, error) { + return json.Marshal(schema) +} + +// DefinitionsToSchemaMap extracts schemas from OpenAPI definitions into a simple map. +// This is useful for serving schemas without the full OpenAPI document structure. +func DefinitionsToSchemaMap(defs map[string]common.OpenAPIDefinition) map[string]*spec.Schema { + result := make(map[string]*spec.Schema, len(defs)) + for name, def := range defs { + schema := def.Schema + result[name] = &schema + } + return result +} + +// SchemaForType extracts a single schema from definitions by type name. +// The typeName should be the canonical Go type name (e.g., "github.com/pkg/types.MyType"). +func SchemaForType(defs map[string]common.OpenAPIDefinition, typeName string) (*spec.Schema, bool) { + def, ok := defs[typeName] + if !ok { + return nil, false + } + schema := def.Schema + return &schema, true +} + +// ============================================================================= +// REFERENCE CALLBACK HELPERS +// ============================================================================= + +// DefaultReferenceCallback creates a reference callback that generates standard OpenAPI refs. +// References are formatted as "#/components/schemas/{name}". +func DefaultReferenceCallback(path string) spec.Ref { + return spec.MustCreateRef("#/components/schemas/" + path) +} + +// DefinitionsReferenceCallback creates a reference callback for OpenAPI 2.0 style refs. +// References are formatted as "#/definitions/{name}". +func DefinitionsReferenceCallback(path string) spec.Ref { + return spec.MustCreateRef("#/definitions/" + path) +} + +// ============================================================================= +// OPENAPI DOCUMENT BUILDER +// ============================================================================= + +// OpenAPIDocument represents a complete OpenAPI 3.0 document. +type OpenAPIDocument struct { + OpenAPI string `json:"openapi"` + Info OpenAPIInfo `json:"info"` + Components *OpenAPIComponents `json:"components,omitempty"` + Paths map[string]interface{} `json:"paths,omitempty"` +} + +// OpenAPIInfo contains API metadata. +type OpenAPIInfo struct { + Title string `json:"title"` + Version string `json:"version"` + Description string `json:"description,omitempty"` +} + +// OpenAPIComponents contains reusable components. +type OpenAPIComponents struct { + Schemas map[string]*spec.Schema `json:"schemas,omitempty"` +} + +// BuildOpenAPIDocument creates an OpenAPI 3.0 document from definitions. +func BuildOpenAPIDocument(title, version, description string, defs map[string]common.OpenAPIDefinition) *OpenAPIDocument { + schemas := make(map[string]*spec.Schema, len(defs)) + for name, def := range defs { + schema := def.Schema + schemas[name] = &schema + } + + return &OpenAPIDocument{ + OpenAPI: "3.0.3", + Info: OpenAPIInfo{ + Title: title, + Version: version, + Description: description, + }, + Components: &OpenAPIComponents{ + Schemas: schemas, + }, + Paths: make(map[string]interface{}), + } +} + +// ToJSON serializes the document to JSON. +func (d *OpenAPIDocument) ToJSON() ([]byte, error) { + return json.MarshalIndent(d, "", " ") +} diff --git a/pkg/reconciler/provider.go b/pkg/reconciler/provider.go new file mode 100644 index 0000000..590c8a3 --- /dev/null +++ b/pkg/reconciler/provider.go @@ -0,0 +1,366 @@ +package reconciler + +import ( + "context" + "fmt" + + "github.com/openeverest/provider-sdk/pkg/apis/v2alpha1" + "github.com/openeverest/provider-sdk/pkg/controller" + "github.com/openeverest/provider-sdk/pkg/server" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const finalizerName = "everest.percona.com/provider-finalizer" + +// ============================================================================= +// PROVIDER RECONCILER - Works with both Interface and Builder providers +// ============================================================================= + +// ProviderReconciler reconciles DataStore resources using a Provider. +type ProviderReconciler struct { + provider providerAdapter + manager ctrl.Manager + serverConfig *server.ServerConfig + server *server.Server + client.Client +} + +// providerAdapter is the internal interface that both provider types satisfy. +type providerAdapter interface { + Name() string + Types() func(*runtime.Scheme) error + OwnedTypes() []client.Object + Validate(c *controller.Context) error + Sync(c *controller.Context) error + Status(c *controller.Context) (controller.Status, error) + Cleanup(c *controller.Context) error +} + +// SchemaProvider is re-exported from controller package for convenience. +// See controller.SchemaProvider for documentation. +type SchemaProvider = controller.SchemaProvider + +// ServerConfig is re-exported from server package for convenience. +// See server.ServerConfig for documentation. +type ServerConfig = server.ServerConfig + +// New creates a reconciler from a provider. +func New(p controller.ProviderInterface, opts ...ReconcilerOption) (*ProviderReconciler, error) { + return newReconciler(p, opts...) +} + +// ReconcilerOption configures the reconciler. +type ReconcilerOption func(*reconcilerOptions) + +type reconcilerOptions struct { + serverConfig *server.ServerConfig +} + +// WithServer enables the integrated HTTP server for schema exposure and validation webhook. +// +// The server provides: +// - Schema endpoint: Returns OpenAPI schemas for components, topologies, and global config +// - Validation webhook: Accepts validation requests and runs the provider's Validate() method +// - Health/Ready endpoints: For Kubernetes probes +// +// Example: +// +// r, err := reconciler.NewFromInterface(provider, +// reconciler.WithServer(reconciler.ServerConfig{ +// Port: 8080, +// SchemaPath: "/schema", +// ValidationPath: "/validate", +// }), +// ) +// +// The provider must implement SchemaProvider interface to register component schemas. +// Validation is handled by the provider's Validate() method - the same validation +// used during reconciliation is exposed via the webhook. +func WithServer(config server.ServerConfig) ReconcilerOption { + return func(o *reconcilerOptions) { + o.serverConfig = &config + } +} + +// newReconciler creates a reconciler from any provider that satisfies providerAdapter. +func newReconciler(p providerAdapter, opts ...ReconcilerOption) (*ProviderReconciler, error) { + // Apply options + options := &reconcilerOptions{} + for _, opt := range opts { + opt(options) + } + scheme := runtime.NewScheme() + + // Register core types + if err := v2alpha1.AddToScheme(scheme); err != nil { + return nil, fmt.Errorf("failed to add v2alpha1 scheme: %w", err) + } + + // Register provider-specific types + if typesFunc := p.Types(); typesFunc != nil { + if err := typesFunc(scheme); err != nil { + return nil, fmt.Errorf("failed to add provider scheme: %w", err) + } + } + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&zap.Options{Development: true}))) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{Scheme: scheme}) + if err != nil { + return nil, fmt.Errorf("failed to create manager: %w", err) + } + + r := &ProviderReconciler{ + provider: p, + manager: mgr, + serverConfig: options.serverConfig, + Client: mgr.GetClient(), + } + + // Setup server if configured + if options.serverConfig != nil { + if err := r.setupServer(p); err != nil { + return nil, fmt.Errorf("failed to setup server: %w", err) + } + } + + if err := r.setup(); err != nil { + return nil, fmt.Errorf("failed to setup reconciler: %w", err) + } + + return r, nil +} + +// GetManager returns the controller manager. +func (r *ProviderReconciler) GetManager() ctrl.Manager { + return r.manager +} + +// setupServer initializes the HTTP server with schemas from the provider. +func (r *ProviderReconciler) setupServer(p providerAdapter) error { + registry := server.NewSchemaRegistry() + + // Check if provider implements SchemaProvider + if sp, ok := p.(SchemaProvider); ok { + // Register component schemas + for name, schemaType := range sp.ComponentSchemas() { + if err := registry.RegisterComponent(name, schemaType); err != nil { + return err + } + } + + // Register topologies (schema + components) + for name, def := range sp.Topologies() { + if err := registry.RegisterTopology(name, def.Schema); err != nil { + return err + } + // Extract component names from the definition + components := make([]string, 0, len(def.Components)) + for compName := range def.Components { + components = append(components, compName) + } + registry.RegisterTopologyComponents(name, components) + } + + // Register global schema + if globalSchema := sp.GlobalSchema(); globalSchema != nil { + if err := registry.RegisterGlobal(globalSchema); err != nil { + return err + } + } + } + + // Create validator function that wraps the provider's Validate method + validator := func(ctx context.Context, c client.Client, dc *v2alpha1.DataStore) error { + // Create context handle with metadata if available + var dsCtx *controller.Context + if mp, ok := p.(controller.MetadataProvider); ok { + metadata := mp.GetMetadata() + dsCtx = controller.NewContextWithMetadata(ctx, c, dc, metadata) + } else { + dsCtx = controller.NewContext(ctx, c, dc) + } + return p.Validate(dsCtx) + } + + r.server = server.NewServer(*r.serverConfig, registry, validator) + return nil +} + +// Start starts the reconciler and server (blocking). +func (r *ProviderReconciler) Start(ctx context.Context) error { + // Start server if configured + if r.server != nil { + r.server.SetClient(r.Client) + go func() { + if err := r.server.Start(ctx); err != nil { + log.FromContext(ctx).Error(err, "Server error") + } + }() + // Mark server as ready once manager is ready + r.server.SetReady(true) + } + + return r.manager.Start(ctx) +} + +// StartWithSignalHandler starts the reconciler and server with OS signal handling. +func (r *ProviderReconciler) StartWithSignalHandler() error { + ctx := ctrl.SetupSignalHandler() + + // Start server if configured + if r.server != nil { + r.server.SetClient(r.Client) + go func() { + if err := r.server.Start(ctx); err != nil { + log.FromContext(ctx).Error(err, "Server error") + } + }() + // Mark server as ready once manager is ready + r.server.SetReady(true) + } + + return r.manager.Start(ctx) +} + +func (r *ProviderReconciler) setup() error { + // Filter to only handle DataStores for this provider + filter := predicate.NewPredicateFuncs(func(object client.Object) bool { + ds, ok := object.(*v2alpha1.DataStore) + if !ok { + return false + } + return ds.Spec.Provider == r.provider.Name() + }) + + b := ctrl.NewControllerManagedBy(r.manager). + For(&v2alpha1.DataStore{}, builder.WithPredicates(filter)). + Named(r.provider.Name() + "-controller") + + // Watch owned types + for _, obj := range r.provider.OwnedTypes() { + b.Owns(obj) + } + + return b.Complete(r) +} + +// Reconcile implements the reconciliation loop. +func (r *ProviderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + logger := log.FromContext(ctx).WithValues("provider", r.provider.Name()) + + // Fetch the DataStore + ds := &v2alpha1.DataStore{} + if err := r.Client.Get(ctx, req.NamespacedName, ds); err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // Create the Context handle with metadata if available + var dsCtx *controller.Context + if mp, ok := r.provider.(controller.MetadataProvider); ok { + metadata := mp.GetMetadata() + dsCtx = controller.NewContextWithMetadata(ctx, r.Client, ds, metadata) + } else { + dsCtx = controller.NewContext(ctx, r.Client, ds) + } + + // Handle deletion + if !ds.GetDeletionTimestamp().IsZero() { + return r.handleDeletion(ctx, dsCtx, ds, logger) + } + + // Ensure finalizer is present + if !controllerutil.ContainsFinalizer(ds, finalizerName) { + controllerutil.AddFinalizer(ds, finalizerName) + if err := r.Client.Update(ctx, ds); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{Requeue: true}, nil + } + + // Run validation + if err := r.provider.Validate(dsCtx); err != nil { + logger.Error(err, "Validation failed") + // Update status to failed + ds.Status.Phase = v2alpha1.DataStorePhaseFailed + if updateErr := r.Client.Status().Update(ctx, ds); updateErr != nil { + logger.Error(updateErr, "Failed to update status after validation error") + } + return reconcile.Result{}, err + } + + // Run sync + logger.Info("Running sync") + if err := r.provider.Sync(dsCtx); err != nil { + if controller.IsWaitError(err) { + logger.Info("Sync waiting", "reason", err.Error()) + return reconcile.Result{RequeueAfter: controller.GetWaitDuration(err)}, nil + } + logger.Error(err, "Sync failed") + return reconcile.Result{}, err + } + + // Compute and update status + logger.Info("Computing status") + status, err := r.provider.Status(dsCtx) + if err != nil { + logger.Error(err, "Status computation failed") + return reconcile.Result{}, err + } + + ds.Status = status.ToV2Alpha1() + if err := r.Client.Status().Update(ctx, ds); err != nil { + logger.Error(err, "Failed to update status") + return reconcile.Result{}, err + } + + logger.Info("Reconciliation complete", "phase", ds.Status.Phase) + return reconcile.Result{}, nil +} + +func (r *ProviderReconciler) handleDeletion( + ctx context.Context, + dsCtx *controller.Context, + ds *v2alpha1.DataStore, + logger interface{ Info(string, ...interface{}) }, +) (reconcile.Result, error) { + if !controllerutil.ContainsFinalizer(ds, finalizerName) { + return reconcile.Result{}, nil + } + + logger.Info("Running cleanup") + + // Update status to deleting + if ds.Status.Phase != v2alpha1.DataStorePhaseDeleting { + ds.Status.Phase = v2alpha1.DataStorePhaseDeleting + if err := r.Client.Status().Update(ctx, ds); err != nil { + return reconcile.Result{}, err + } + } + + // Run cleanup + if err := r.provider.Cleanup(dsCtx); err != nil { + if controller.IsWaitError(err) { + logger.Info("Cleanup waiting", "reason", err.Error()) + return reconcile.Result{RequeueAfter: controller.GetWaitDuration(err)}, nil + } + return reconcile.Result{}, err + } + + // Remove finalizer + controllerutil.RemoveFinalizer(ds, finalizerName) + if err := r.Client.Update(ctx, ds); err != nil { + return reconcile.Result{}, err + } + + logger.Info("Cleanup complete") + return reconcile.Result{}, nil +} diff --git a/pkg/server/schema.go b/pkg/server/schema.go new file mode 100644 index 0000000..eab77e9 --- /dev/null +++ b/pkg/server/schema.go @@ -0,0 +1,345 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/getkin/kin-openapi/openapi3gen" +) + +// SchemaRegistry holds the registered schemas for components, topologies, and global config. +// It uses kin-openapi for OpenAPI 3.0 schema generation from Go types. +type SchemaRegistry struct { + mu sync.RWMutex + + // Components maps component name to its schema (e.g., "mongod", "mongos", "cfg") + Components map[string]*openapi3.SchemaRef + + // Topologies maps topology name to its schema (e.g., "replicaset", "sharded") + Topologies map[string]*openapi3.SchemaRef + + // TopologyComponents maps topology name to list of supported component names + TopologyComponents map[string][]string + + // Global is the schema for global/cluster-wide configuration + Global *openapi3.SchemaRef +} + +// NewSchemaRegistry creates a new SchemaRegistry. +func NewSchemaRegistry() *SchemaRegistry { + return &SchemaRegistry{ + Components: make(map[string]*openapi3.SchemaRef), + Topologies: make(map[string]*openapi3.SchemaRef), + TopologyComponents: make(map[string][]string), + } +} + +// defaultSchemaCustomizer parses common OpenAPI-related struct tags and applies them to the schema. +// Supported tags: +// - description: Schema description +// - enum: Comma-separated list of allowed values (e.g., `enum:"a,b,c"`) +// - default: Default value +// - example: Example value +// - minimum: Minimum value for numbers +// - maximum: Maximum value for numbers +// - minLength: Minimum length for strings +// - maxLength: Maximum length for strings +func defaultSchemaCustomizer(name string, t reflect.Type, tag reflect.StructTag, schema *openapi3.Schema) error { + // Description + if desc := tag.Get("description"); desc != "" { + schema.Description = desc + } + + // Enum values + if enumStr := tag.Get("enum"); enumStr != "" { + values := strings.Split(enumStr, ",") + schema.Enum = make([]any, len(values)) + for i, v := range values { + schema.Enum[i] = strings.TrimSpace(v) + } + } + + // Default value (parse based on schema type) + if defaultStr := tag.Get("default"); defaultStr != "" { + schema.Default = parseValue(defaultStr, schema) + } + + // Example value + if exampleStr := tag.Get("example"); exampleStr != "" { + schema.Example = parseValue(exampleStr, schema) + } + + // Minimum (for numbers) + if minStr := tag.Get("minimum"); minStr != "" { + if val, err := strconv.ParseFloat(minStr, 64); err == nil { + schema.Min = &val + } + } + + // Maximum (for numbers) + if maxStr := tag.Get("maximum"); maxStr != "" { + if val, err := strconv.ParseFloat(maxStr, 64); err == nil { + schema.Max = &val + } + } + + // MinLength (for strings) + if minLenStr := tag.Get("minLength"); minLenStr != "" { + if val, err := strconv.ParseUint(minLenStr, 10, 64); err == nil { + schema.MinLength = val + } + } + + // MaxLength (for strings) + if maxLenStr := tag.Get("maxLength"); maxLenStr != "" { + if val, err := strconv.ParseUint(maxLenStr, 10, 64); err == nil { + maxLen := val + schema.MaxLength = &maxLen + } + } + + return nil +} + +// parseValue attempts to parse a string value based on the schema type. +func parseValue(s string, schema *openapi3.Schema) any { + if schema.Type == nil { + return s + } + + switch { + case schema.Type.Is("boolean"): + if v, err := strconv.ParseBool(s); err == nil { + return v + } + case schema.Type.Is("integer"): + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + return v + } + case schema.Type.Is("number"): + if v, err := strconv.ParseFloat(s, 64); err == nil { + return v + } + } + return s +} + +// generateSchema generates an OpenAPI schema for a Go type with custom tag support. +func generateSchema(typ any) (*openapi3.SchemaRef, error) { + return openapi3gen.NewSchemaRefForValue( + typ, + nil, + openapi3gen.SchemaCustomizer(defaultSchemaCustomizer), + ) +} + +// RegisterComponent registers a component type schema. +// The type should be the Go struct that represents the component's custom spec. +// Example: registry.RegisterComponent("mongod", MongodCustomSpec{}) +func (r *SchemaRegistry) RegisterComponent(name string, typ any) error { + r.mu.Lock() + defer r.mu.Unlock() + + schemaRef, err := generateSchema(typ) + if err != nil { + return fmt.Errorf("failed to generate schema for component %q: %w", name, err) + } + r.Components[name] = schemaRef + return nil +} + +// RegisterTopology registers a topology type schema. +// Example: registry.RegisterTopology("replicaset", ReplicaSetTopologyConfig{}) +func (r *SchemaRegistry) RegisterTopology(name string, typ any) error { + r.mu.Lock() + defer r.mu.Unlock() + + schemaRef, err := generateSchema(typ) + if err != nil { + return fmt.Errorf("failed to generate schema for topology %q: %w", name, err) + } + r.Topologies[name] = schemaRef + return nil +} + +// RegisterGlobal registers the global configuration schema. +// Example: registry.RegisterGlobal(GlobalConfig{}) +func (r *SchemaRegistry) RegisterGlobal(typ any) error { + r.mu.Lock() + defer r.mu.Unlock() + + schemaRef, err := generateSchema(typ) + if err != nil { + return fmt.Errorf("failed to generate schema for global config: %w", err) + } + r.Global = schemaRef + return nil +} + +// RegisterTopologyComponents registers which components are supported by a topology. +// This is used for validation and documentation purposes. +// Example: registry.RegisterTopologyComponents("replicaset", []string{"engine", "backupAgent", "monitoring"}) +func (r *SchemaRegistry) RegisterTopologyComponents(topologyName string, components []string) { + r.mu.Lock() + defer r.mu.Unlock() + r.TopologyComponents[topologyName] = components +} + +// MustRegisterComponent is like RegisterComponent but panics on error. +func (r *SchemaRegistry) MustRegisterComponent(name string, typ any) { + if err := r.RegisterComponent(name, typ); err != nil { + panic(err) + } +} + +// MustRegisterTopology is like RegisterTopology but panics on error. +func (r *SchemaRegistry) MustRegisterTopology(name string, typ any) { + if err := r.RegisterTopology(name, typ); err != nil { + panic(err) + } +} + +// MustRegisterGlobal is like RegisterGlobal but panics on error. +func (r *SchemaRegistry) MustRegisterGlobal(typ any) { + if err := r.RegisterGlobal(typ); err != nil { + panic(err) + } +} + +// GetComponentSchema returns the schema for a component by name. +func (r *SchemaRegistry) GetComponentSchema(name string) (*openapi3.SchemaRef, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + schema, ok := r.Components[name] + return schema, ok +} + +// GetTopologySchema returns the schema for a topology by name. +func (r *SchemaRegistry) GetTopologySchema(name string) (*openapi3.SchemaRef, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + schema, ok := r.Topologies[name] + return schema, ok +} + +// GetGlobalSchema returns the global configuration schema. +func (r *SchemaRegistry) GetGlobalSchema() (*openapi3.SchemaRef, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + return r.Global, r.Global != nil +} + +// GetTopologyComponents returns the list of components supported by a topology. +func (r *SchemaRegistry) GetTopologyComponents(topologyName string) ([]string, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + components, ok := r.TopologyComponents[topologyName] + return components, ok +} + +// ListComponents returns all registered component names. +func (r *SchemaRegistry) ListComponents() []string { + r.mu.RLock() + defer r.mu.RUnlock() + names := make([]string, 0, len(r.Components)) + for name := range r.Components { + names = append(names, name) + } + return names +} + +// ListTopologies returns all registered topology names. +func (r *SchemaRegistry) ListTopologies() []string { + r.mu.RLock() + defer r.mu.RUnlock() + names := make([]string, 0, len(r.Topologies)) + for name := range r.Topologies { + names = append(names, name) + } + return names +} + +// AllSchemas returns a complete OpenAPI 3.0 document containing all registered schemas. +// This is useful for documentation and can be served at an endpoint. +func (r *SchemaRegistry) AllSchemas() *openapi3.T { + r.mu.RLock() + defer r.mu.RUnlock() + + doc := &openapi3.T{ + OpenAPI: "3.0.3", + Info: &openapi3.Info{ + Title: "Provider Configuration Schemas", + Version: "1.0.0", + Description: "OpenAPI schemas for provider component, topology, and global configurations", + }, + Components: &openapi3.Components{ + Schemas: make(openapi3.Schemas), + }, + } + + // Add component schemas + for name, schema := range r.Components { + doc.Components.Schemas["component."+name] = schema + } + + // Add topology schemas + for name, schema := range r.Topologies { + doc.Components.Schemas["topology."+name] = schema + } + + // Add global schema + if r.Global != nil { + doc.Components.Schemas["global"] = r.Global + } + + // Add topology components information as extension + if len(r.TopologyComponents) > 0 { + doc.Extensions = make(map[string]interface{}) + doc.Extensions["x-topology-components"] = r.TopologyComponents + } + + return doc +} + +// GenerateSchema generates an OpenAPI schema for any Go type using kin-openapi. +// This is a convenience function for one-off schema generation. +// It supports the following struct tags for schema customization: +// - description: Schema description +// - enum: Comma-separated list of allowed values +// - default: Default value +// - example: Example value +// - minimum/maximum: Numeric bounds +// - minLength/maxLength: String length bounds +func GenerateSchema(typ any) (*openapi3.SchemaRef, error) { + return generateSchema(typ) +} + +// MustGenerateSchema is like GenerateSchema but panics on error. +func MustGenerateSchema(typ any) *openapi3.SchemaRef { + schema, err := GenerateSchema(typ) + if err != nil { + panic(err) + } + return schema +} + diff --git a/pkg/server/server.go b/pkg/server/server.go new file mode 100644 index 0000000..9cb21ea --- /dev/null +++ b/pkg/server/server.go @@ -0,0 +1,306 @@ +package server + +// Provider HTTP Server +// +// This file implements the HTTP server that providers run alongside their controller. +// The server exposes: +// +// 1. Schema Endpoint (/schema) - Returns OpenAPI schemas for components, topologies, global config +// 2. Validation Webhook (/validate) - Accepts admission review requests and validates DataStores +// 3. Health Endpoint (/healthz) - Kubernetes health check +// 4. Ready Endpoint (/readyz) - Kubernetes readiness check +// +// The server is integrated with the reconciler and runs in the same process. + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + "github.com/openeverest/provider-sdk/pkg/apis/v2alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// ============================================================================= +// SERVER CONFIGURATION +// ============================================================================= + +// ServerConfig configures the provider HTTP server. +type ServerConfig struct { + // Port is the port to listen on (default: 8080) + Port int + + // SchemaPath is the path for the schema endpoint (default: /schema) + SchemaPath string + + // ValidationPath is the path for the validation webhook (default: /validate) + ValidationPath string + + // HealthPath is the path for health checks (default: /healthz) + HealthPath string + + // ReadyPath is the path for readiness checks (default: /readyz) + ReadyPath string + + // ReadTimeout is the maximum duration for reading the entire request (default: 10s) + ReadTimeout time.Duration + + // WriteTimeout is the maximum duration before timing out writes (default: 10s) + WriteTimeout time.Duration +} + +// DefaultServerConfig returns a ServerConfig with sensible defaults. +func DefaultServerConfig() ServerConfig { + return ServerConfig{ + Port: 8080, + SchemaPath: "/schema", + ValidationPath: "/validate", + HealthPath: "/healthz", + ReadyPath: "/readyz", + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + } +} + +// ============================================================================= +// PROVIDER SERVER +// ============================================================================= + +// ValidatorFunc is a function that validates a DataStore. +// It receives the context, a Kubernetes client (for fetching related resources), +// and the DataStore to validate. +// Return nil if validation passes, or an error with a user-friendly message. +type ValidatorFunc func(ctx context.Context, c client.Client, dc *v2alpha1.DataStore) error + +// Server is the HTTP server for a provider. +type Server struct { + config ServerConfig + registry *SchemaRegistry + validator ValidatorFunc + client client.Client + + server *http.Server + ready bool + mu sync.RWMutex +} + +// NewServer creates a new provider server. +func NewServer(config ServerConfig, registry *SchemaRegistry, validator ValidatorFunc) *Server { + if config.Port == 0 { + config.Port = 8080 + } + if config.SchemaPath == "" { + config.SchemaPath = "/schema" + } + if config.ValidationPath == "" { + config.ValidationPath = "/validate" + } + if config.HealthPath == "" { + config.HealthPath = "/healthz" + } + if config.ReadyPath == "" { + config.ReadyPath = "/readyz" + } + + return &Server{ + config: config, + registry: registry, + validator: validator, + } +} + +// SetClient sets the Kubernetes client (called by reconciler after manager is ready). +func (s *Server) SetClient(c client.Client) { + s.mu.Lock() + defer s.mu.Unlock() + s.client = c +} + +// SetReady marks the server as ready to serve traffic. +func (s *Server) SetReady(ready bool) { + s.mu.Lock() + defer s.mu.Unlock() + s.ready = ready +} + +// Start starts the HTTP server (blocking). +func (s *Server) Start(ctx context.Context) error { + mux := http.NewServeMux() + + // Register endpoints + mux.HandleFunc(s.config.SchemaPath, s.handleSchema) + mux.HandleFunc(s.config.ValidationPath, s.handleValidation) + mux.HandleFunc(s.config.HealthPath, s.handleHealth) + mux.HandleFunc(s.config.ReadyPath, s.handleReady) + + s.server = &http.Server{ + Addr: fmt.Sprintf(":%d", s.config.Port), + Handler: mux, + ReadTimeout: s.config.ReadTimeout, + WriteTimeout: s.config.WriteTimeout, + } + + logger := log.FromContext(ctx) + logger.Info("Starting provider server", + "port", s.config.Port, + "schemaPath", s.config.SchemaPath, + "validationPath", s.config.ValidationPath, + ) + + // Start server in goroutine + errCh := make(chan error, 1) + go func() { + if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + errCh <- err + } + }() + + // Wait for context cancellation or error + select { + case <-ctx.Done(): + logger.Info("Shutting down provider server") + shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + return s.server.Shutdown(shutdownCtx) + case err := <-errCh: + return err + } +} + +// ============================================================================= +// HTTP HANDLERS +// ============================================================================= + +// handleSchema serves the OpenAPI schema for all registered types. +func (s *Server) handleSchema(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // Get the OpenAPI document with all schemas + doc := s.registry.AllSchemas() + + // Marshal to JSON + data, err := doc.MarshalJSON() + if err != nil { + http.Error(w, fmt.Sprintf("Failed to generate schema: %v", err), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(data) +} + +// handleValidation handles validation webhook requests. +// It expects a ValidationRequest and returns a ValidationResponse. +func (s *Server) handleValidation(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // Parse request + var req ValidationRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.writeValidationResponse(w, &ValidationResponse{ + Allowed: false, + Message: fmt.Sprintf("Failed to parse request: %v", err), + }) + return + } + + // Get client + s.mu.RLock() + c := s.client + s.mu.RUnlock() + + if c == nil { + s.writeValidationResponse(w, &ValidationResponse{ + Allowed: false, + Message: "Server not ready: client not initialized", + }) + return + } + + // Run validation + ctx := r.Context() + var validationErr error + if s.validator != nil { + validationErr = s.validator(ctx, c, &req.Object) + } + + // Return response + if validationErr != nil { + s.writeValidationResponse(w, &ValidationResponse{ + Allowed: false, + Message: validationErr.Error(), + }) + return + } + + s.writeValidationResponse(w, &ValidationResponse{ + Allowed: true, + }) +} + +func (s *Server) writeValidationResponse(w http.ResponseWriter, resp *ValidationResponse) { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(resp) +} + +// handleHealth returns 200 if the server is running. +func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("ok")) +} + +// handleReady returns 200 if the server is ready to serve traffic. +func (s *Server) handleReady(w http.ResponseWriter, r *http.Request) { + s.mu.RLock() + ready := s.ready + s.mu.RUnlock() + + if ready { + w.WriteHeader(http.StatusOK) + w.Write([]byte("ok")) + } else { + w.WriteHeader(http.StatusServiceUnavailable) + w.Write([]byte("not ready")) + } +} + +// ============================================================================= +// VALIDATION REQUEST/RESPONSE TYPES +// ============================================================================= + +// ValidationRequest is the request body for the validation webhook. +// This is a simplified version - in production you might use Kubernetes +// admission review types directly. +type ValidationRequest struct { + // Object is the DataStore being validated + Object v2alpha1.DataStore `json:"object"` + + // OldObject is the existing DataStore (for UPDATE operations) + // May be nil for CREATE operations + OldObject *v2alpha1.DataStore `json:"oldObject,omitempty"` + + // Operation is the operation being performed (CREATE, UPDATE, DELETE) + Operation string `json:"operation,omitempty"` +} + +// ValidationResponse is the response body for the validation webhook. +type ValidationResponse struct { + // Allowed indicates whether the request is allowed + Allowed bool `json:"allowed"` + + // Message is the reason for denial (if not allowed) + Message string `json:"message,omitempty"` + + // Warnings are non-blocking warnings to return to the user + Warnings []string `json:"warnings,omitempty"` +}