diff --git a/.gitignore b/.gitignore index c71ab73..8cf5c3b 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,9 @@ coverage.* *.coverprofile profile.cov +# Profiling artifacts +dev/profiles/ + # Dependency directories (remove the comment below to include it) # vendor/ diff --git a/README.md b/README.md index 22dbb0a..12236f3 100644 --- a/README.md +++ b/README.md @@ -147,29 +147,14 @@ keyFunc := func(obj runtime.Object) (string, error) { } ``` -#### Admission: server-owned cluster label -We stamp a server-owned cluster label on persisted objects and validate it on -create/update to prevent cross-cluster writes and support watchcache keying. +#### Admission: internal cluster identity +We store cluster identity in managed fields and validate it on create/update +to prevent cross-cluster writes and support watchcache keying. ```go -lbls := accessor.GetLabels() -if lbls == nil { - lbls = map[string]string{} -} -key := m.Options.ClusterAnnotationKey -if key == "" { - key = mcv1.DefaultClusterAnnotation -} -lbls[key] = cid -accessor.SetLabels(lbls) -``` -Note: the `ClusterAnnotationKey` fallback to `DefaultClusterAnnotation` is -temporary and not ideal. It exists to preserve compatibility while we migrate -callers to an explicit label key; avoid relying on this fallback long term. - -```go -if cid := acc.GetLabels()[key]; cid != reqCID { - return fmt.Errorf("cluster label %q=%q must match request cluster %q", key, cid, reqCID) +mc.SetObjectClusterIdentity(obj, reqCID) +if cid := mc.ObjectClusterIdentity(obj); cid != reqCID { + return fmt.Errorf("cluster identity %q must match request cluster %q", cid, reqCID) } ``` diff --git a/cmd/apiserver/app/config.go b/cmd/apiserver/app/config.go index 5c2f474..fdeb4c1 100644 --- a/cmd/apiserver/app/config.go +++ b/cmd/apiserver/app/config.go @@ -127,7 +127,8 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { mcOpts.DefaultCluster = opts.RootControlPlaneName } clientPool := mc.NewClientPool(genericConfig.LoopbackClientConfig, mcOpts.PathPrefix, mcOpts.ControlPlaneSegment) - informerPool := mc.NewInformerPoolFromClientPool(clientPool, 0, genericConfig.DrainedNotify()) + informerRegistry := mc.NewInformerRegistry(wait.ContextForChannel(genericConfig.DrainedNotify())) + mcOpts.InformerRegistry = informerRegistry var crdRuntimeMgr *mcbootstrap.CRDRuntimeManager systemNamespaceBootstrapper := mcbootstrap.NewSystemNamespaceBootstrapper(mcbootstrap.SystemNamespaceOptions{ ClientForCluster: clientPool.KubeClientForCluster, @@ -146,7 +147,7 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { }) genericConfig.BuildHandlerChainFunc = func(h http.Handler, conf *server.Config) http.Handler { ex := mc.PathExtractor{PathPrefix: mcOpts.PathPrefix, ControlPlaneSegment: mcOpts.ControlPlaneSegment} - base := server.DefaultBuildHandlerChain(h, conf) + base := withVersionOverride(server.DefaultBuildHandlerChain(h, conf)) dispatch := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cid, _, _ := mc.FromContext(r.Context()) if cid != "" && cid != mcOpts.DefaultCluster && crdRuntimeMgr != nil { @@ -162,10 +163,7 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { return } if h, err := crdRuntimeMgr.Runtime(cid, genericConfig.DrainedNotify()); err == nil && h != nil { - h = genericfilters.WithRequestInfo(h, conf.RequestInfoResolver) - h = genericfilters.WithAuditInit(h) - h = serverfilters.WithPanicRecovery(h, conf.RequestInfoResolver) - h.ServeHTTP(w, r) + wrapClusterCRDHandler(h, conf, cid, false).ServeHTTP(w, r) return } klog.Errorf("mc.crdRuntime unresolved at kube cluster=%s path=%s", cid, r.URL.Path) @@ -187,13 +185,15 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { EgressSelector: genericConfig.EgressSelector, APIServerID: genericConfig.APIServerID, ClientPool: clientPool, - InformerPool: informerPool, + InformerRegistry: informerRegistry, }) if genericConfig.Authentication.Authenticator != nil { genericConfig.Authentication.Authenticator = mcauth.NewClusterAuthenticator(mcOpts.DefaultCluster, genericConfig.Authentication.Authenticator, authManager) } if genericConfig.Authorization.Authorizer != nil { - clusterAuthorizer := mcauth.NewClusterAuthorizer(mcOpts.DefaultCluster, genericConfig.Authorization.Authorizer, genericConfig.RuleResolver, authManager) + // Route root and tenant authorization through the same multicluster + // manager-backed path to avoid root-only stale lister divergence. + clusterAuthorizer := mcauth.NewClusterAuthorizer(mcOpts.DefaultCluster, nil, nil, authManager) genericConfig.Authorization.Authorizer = clusterAuthorizer genericConfig.RuleResolver = clusterAuthorizer } @@ -209,7 +209,7 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { PathPrefix: mcOpts.PathPrefix, ControlPlaneSegment: mcOpts.ControlPlaneSegment, ClientPool: clientPool, - InformerPool: informerPool, + InformerRegistry: informerRegistry, }) mcNamespaceLifecycle := mcnsl.NewLifecycle(mcOpts, mcNamespaceMgr) @@ -239,11 +239,7 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { targetPort = opts.SecureServing.BindPort } stopChForCluster := func(clusterID string) (<-chan struct{}, error) { - _, _, stopCh, err := informerPool.Get(clusterID) - if err != nil { - return nil, err - } - return stopCh, nil + return genericConfig.DrainedNotify(), nil } internalControllerMgr := mcbootstrap.NewInternalControllerManager(mcbootstrap.InternalControllerOptions{ ClientForCluster: clientPool.KubeClientForCluster, @@ -291,7 +287,7 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { ControlPlaneSegment: mcOpts.ControlPlaneSegment, CelRuntime: celRuntime, ClientPool: clientPool, - InformerPool: informerPool, + InformerRegistry: informerRegistry, }) mcMutatingWebhook := mcwh.NewMutating(mcOpts, mcWebhookMgr) mcValidatingWebhook := mcwh.NewValidating(mcOpts, mcWebhookMgr) @@ -317,14 +313,15 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { if apiExtensions.GenericConfig.RESTOptionsGetter != nil { apiExtensions.GenericConfig.RESTOptionsGetter = decorateRESTOptionsGetter("apiextensions", apiExtensions.GenericConfig.RESTOptionsGetter, mcOpts) } - apiExtensionsClientPool := mc.NewAPIExtensionsClientPool(apiExtensions.GenericConfig.LoopbackClientConfig, mcOpts.PathPrefix, mcOpts.ControlPlaneSegment) - apiExtensionsInformerPool := mc.NewAPIExtensionsInformerPoolFromClientPool(apiExtensionsClientPool, 0, genericConfig.DrainedNotify()) + if apiExtensions.ExtraConfig.CRDRESTOptionsGetter != nil { + apiExtensions.ExtraConfig.CRDRESTOptionsGetter = decorateRESTOptionsGetter("apiextensions-crd", apiExtensions.ExtraConfig.CRDRESTOptionsGetter, mcOpts) + } crdRuntimeMgr = mcbootstrap.NewCRDRuntimeManager(mcbootstrap.CRDRuntimeManagerOptions{ - BaseAPIExtensionsConfig: apiExtensions, - APIExtensionsInformerPool: apiExtensionsInformerPool, - PathPrefix: mcOpts.PathPrefix, - ControlPlaneSegment: mcOpts.ControlPlaneSegment, - DefaultCluster: mcOpts.DefaultCluster, + BaseAPIExtensionsConfig: apiExtensions, + InformerRegistry: informerRegistry, + PathPrefix: mcOpts.PathPrefix, + ControlPlaneSegment: mcOpts.ControlPlaneSegment, + DefaultCluster: mcOpts.DefaultCluster, }) setCRDRequestResolvers(apiExtensions, crdRuntimeMgr.CRDGetterForRequest, crdRuntimeMgr.CRDListerForRequest) crdController := mcbootstrap.NewMulticlusterCRDController(crdRuntimeMgr, mcOpts.DefaultCluster) @@ -361,19 +358,13 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { http.Error(w, "cluster CRD runtime unavailable", http.StatusServiceUnavailable) return true } - // Ensure RequestInfo is computed from the normalized /apis path - // before entering the cluster-scoped CRD runtime handler. - h = genericfilters.WithRequestInfo(h, conf.RequestInfoResolver) - h = withClusterCRDRequestInfoRewrite(h, clusterID) - h = genericfilters.WithAuditInit(h) - h = serverfilters.WithPanicRecovery(h, conf.RequestInfoResolver) - h.ServeHTTP(w, r) + wrapClusterCRDHandler(h, conf, clusterID, false).ServeHTTP(w, r) return true } // Ensure CRDs are also routed through the multicluster handler apiExtensions.GenericConfig.BuildHandlerChainFunc = func(h http.Handler, conf *server.Config) http.Handler { ex := mc.PathExtractor{PathPrefix: mcOpts.PathPrefix, ControlPlaneSegment: mcOpts.ControlPlaneSegment} - base := server.DefaultBuildHandlerChain(h, conf) + base := withVersionOverride(server.DefaultBuildHandlerChain(h, conf)) dispatch := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cid, _, _ := mc.FromContext(r.Context()) if cid != "" && cid != mcOpts.DefaultCluster { @@ -390,11 +381,11 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { mut := mca.NewMutating(mcOpts) val := mca.NewValidating(mcOpts) base := apiExtensions.GenericConfig.AdmissionControl - chain := []admission.Interface{mut, mcNamespaceLifecycle, mcMutatingWebhook} + chain := []admission.Interface{mut, mcNamespaceLifecycle} if base != nil { chain = append(chain, base) } - chain = append(chain, mcValidatingWebhook, val) + chain = append(chain, val) apiExtensions.GenericConfig.AdmissionControl = admission.NewChainHandler(chain...) } c.ApiExtensions = apiExtensions @@ -409,7 +400,7 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { // Ensure aggregator also receives multicluster routing aggregator.GenericConfig.BuildHandlerChainFunc = func(h http.Handler, conf *server.Config) http.Handler { ex := mc.PathExtractor{PathPrefix: mcOpts.PathPrefix, ControlPlaneSegment: mcOpts.ControlPlaneSegment} - base := server.DefaultBuildHandlerChain(h, conf) + base := withVersionOverride(server.DefaultBuildHandlerChain(h, conf)) dispatch := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cid, _, _ := mc.FromContext(r.Context()) if cid != "" && cid != mcOpts.DefaultCluster && crdRuntimeMgr != nil { @@ -426,11 +417,11 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { mut := mca.NewMutating(mcOpts) val := mca.NewValidating(mcOpts) base := aggregator.GenericConfig.AdmissionControl - chain := []admission.Interface{mut, mcNamespaceLifecycle, mcMutatingWebhook} + chain := []admission.Interface{mut, mcNamespaceLifecycle} if base != nil { chain = append(chain, base) } - chain = append(chain, mcValidatingWebhook, val) + chain = append(chain, val) aggregator.GenericConfig.AdmissionControl = admission.NewChainHandler(chain...) } c.Aggregator = aggregator @@ -480,6 +471,25 @@ func withClusterCRDRequestInfoRewrite(next http.Handler, clusterID string) http. }) } +func wrapClusterCRDHandler(next http.Handler, conf *server.Config, clusterID string, rewriteRequestInfo bool) http.Handler { + if next == nil || conf == nil { + return next + } + h := next + if rewriteRequestInfo { + h = withClusterCRDRequestInfoRewrite(h, clusterID) + } + h = genericfilters.WithAuthorization(h, conf.Authorization.Authorizer, conf.Serializer) + failedHandler := genericfilters.Unauthorized(conf.Serializer) + failedHandler = genericfilters.WithFailedAuthenticationAudit(failedHandler, conf.AuditBackend, conf.AuditPolicyRuleEvaluator) + h = genericfilters.WithAuthentication(h, conf.Authentication.Authenticator, failedHandler, conf.Authentication.APIAudiences, conf.Authentication.RequestHeaderConfig) + // RequestInfo must be available before rewrite/authn/authz wrappers execute. + h = genericfilters.WithRequestInfo(h, conf.RequestInfoResolver) + h = genericfilters.WithAuditInit(h) + h = serverfilters.WithPanicRecovery(h, conf.RequestInfoResolver) + return h +} + func decorateRESTOptionsGetter(server string, getter generic.RESTOptionsGetter, opts mc.Options) generic.RESTOptionsGetter { if _, ok := getter.(mc.RESTOptionsDecorator); ok { klog.Infof("mc.restOptionsGetter server=%s alreadyDecorated=true", server) diff --git a/cmd/apiserver/app/server.go b/cmd/apiserver/app/server.go index f30bcf1..ae5f809 100644 --- a/cmd/apiserver/app/server.go +++ b/cmd/apiserver/app/server.go @@ -49,9 +49,8 @@ import ( logsapi "k8s.io/component-base/logs/api/v1" _ "k8s.io/component-base/metrics/prometheus/workqueue" "k8s.io/component-base/term" - utilversion "k8s.io/component-base/version" "k8s.io/component-base/version/verflag" - "k8s.io/component-base/zpages/flagz" + "k8s.io/apiserver/pkg/server/flagz" "k8s.io/klog/v2" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" "k8s.io/kubernetes/pkg/capabilities" @@ -148,7 +147,7 @@ cluster's shared state through which all other components interact.`, // Run runs the specified APIServer. This should never exit. func Run(ctx context.Context, opts options.CompletedOptions) error { // To help debugging, immediately log version - klog.Infof("Version: %+v", utilversion.Get()) + klog.Infof("Version: %+v", normalizedServerVersion()) klog.InfoS("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK")) diff --git a/cmd/apiserver/app/version_override.go b/cmd/apiserver/app/version_override.go new file mode 100644 index 0000000..a6b3843 --- /dev/null +++ b/cmd/apiserver/app/version_override.go @@ -0,0 +1,51 @@ +package app + +import ( + "encoding/json" + "net/http" + "strings" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + utilversion "k8s.io/component-base/version" +) + +const gitVersionArchivePlaceholder = "v0.0.0-master+$Format:%H$" + +func withVersionOverride(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if isVersionPath(r.URL.Path) { + info := normalizedServerVersion() + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(info) + return + } + next.ServeHTTP(w, r) + }) +} + +func isVersionPath(path string) bool { + if path == "/version" || path == "/version/" { + return true + } + trimmed := strings.Trim(path, "/") + parts := strings.Split(trimmed, "/") + return len(parts) == 3 && parts[0] == "clusters" && parts[2] == "version" +} + +func normalizedServerVersion() apimachineryversion.Info { + info := utilversion.Get() + if info.GitVersion != gitVersionArchivePlaceholder { + return info + } + + parts := strings.SplitN(utilversion.DefaultKubeBinaryVersion, ".", 2) + if len(parts) != 2 { + return info + } + + info.Major = parts[0] + info.Minor = parts[1] + info.GitVersion = "v" + utilversion.DefaultKubeBinaryVersion + ".0" + return info +} + diff --git a/cmd/apiserver/app/version_override_test.go b/cmd/apiserver/app/version_override_test.go new file mode 100644 index 0000000..134a55f --- /dev/null +++ b/cmd/apiserver/app/version_override_test.go @@ -0,0 +1,30 @@ +package app + +import "testing" + +func TestIsVersionPath(t *testing.T) { + tests := []struct { + path string + want bool + }{ + {path: "/version", want: true}, + {path: "/version/", want: true}, + {path: "/clusters/kpt-1/version", want: true}, + {path: "/clusters/kpt-1/version/", want: true}, + {path: "/apis", want: false}, + } + + for _, tt := range tests { + if got := isVersionPath(tt.path); got != tt.want { + t.Fatalf("isVersionPath(%q)=%v, want %v", tt.path, got, tt.want) + } + } +} + +func TestNormalizedServerVersion(t *testing.T) { + info := normalizedServerVersion() + if info.GitVersion == gitVersionArchivePlaceholder { + t.Fatalf("expected non-placeholder git version, got %q", info.GitVersion) + } +} + diff --git a/docs/storage-aware-key-design.md b/docs/storage-aware-key-design.md new file mode 100644 index 0000000..976a47c --- /dev/null +++ b/docs/storage-aware-key-design.md @@ -0,0 +1,184 @@ +# Key-Aware Shared Storage Design + +## Overview +This document proposes a storage architecture that keeps the shared informer/watch +fanout model while removing the need to persist a user-visible cluster label on +objects for list/watch correctness. + +The core idea is to carry cluster identity as internal storage metadata derived +from keyspace semantics, not from object metadata. + +## Problem Statement +Current multicluster storage uses one shared store/watchcache per resource kind, +which is the right scalability direction. However, cache keying currently depends +on a server-owned label (for example `multicluster.k8s.io/cluster`) persisted on +objects so cluster identity can be recovered from object-only key functions. + +That creates two issues: +- **API shape drift**: clients may observe server-owned placement labels. +- **Coupling**: cluster identity is coupled to object metadata rather than storage + metadata, making future layout changes harder. + +## Goals +- Preserve one shared watcher/cache per resource kind. +- Preserve strict per-cluster list/watch/read/write isolation. +- Eliminate dependency on object labels for watchcache/index keying. +- Keep cluster identity internal-only and non-serialized to clients. +- Allow future etcd key layout evolution without touching API object schema. + +## Non-Goals +- Replacing Kubernetes generic registry behavior wholesale. +- Introducing per-cluster watchcaches/stores again. +- Changing external Kubernetes API semantics for selectors/versioning/watch. + +## Design Summary +Introduce a key-aware internal envelope used inside storage/cache fanout: +- `Object runtime.Object` (decoded Kubernetes object) +- `ClusterID string` (internal placement identity) +- `StorageKey string` (canonical key) +- optional `LayoutVersion string` + +All fanout/index decisions use `ClusterID` from the envelope. Only `Object` is +serialized in API responses. + +### Placement Resolver +Add a small abstraction that resolves cluster identity from keyspace metadata: +- `ClusterFromStorageKey(key string) (clusterID string, ok bool)` +- versioned implementations for key layout evolution. + +This centralizes parsing and decouples routing logic from hard-coded path parsing +scattered across caches. + +## Architecture +1. Request routing still establishes cluster scope in context. +2. Storage key rewrite still places data under cluster-specific subpaths. +3. Shared low-level watch/list reads from kind-root prefix. +4. Internal cache transforms events into envelope entries. +5. Envelope `ClusterID` drives per-request filter/fanout. +6. API response encoding emits only wrapped object payload. + +## New and Modified Files / Methods + +### New Files (proposed) +- `pkg/multicluster/storage/placement_resolver.go` + - `type PlacementResolver interface` + - `type KeyLayoutPlacementResolver struct` + - `func (r *KeyLayoutPlacementResolver) ClusterFromStorageKey(...)` + +- `pkg/multicluster/storage/entry.go` + - `type InternalEntry struct { Object, ClusterID, StorageKey, LayoutVersion }` + - helper constructors and cloning utilities. + +- `pkg/multicluster/storage/keyaware_cache.go` + - shared cache wrapper that consumes watch events and stores `InternalEntry`. + - per-request filtering by cluster. + +- `pkg/multicluster/storage/keyaware_watch.go` + - wraps watch streams and emits only runtime objects while using entry metadata + for internal dispatch. + +- `pkg/multicluster/storage/keyaware_index.go` + - index helpers keyed by `(clusterID, namespace, name)` from entry metadata. + +- `pkg/multicluster/storage/keyaware_cache_test.go` +- `pkg/multicluster/storage/placement_resolver_test.go` + +### Existing Files to Modify +- `pkg/multicluster/storage.go` + - keep context-based key rewrite and shared kind-root behavior. + - replace object-label-based key derivation path with key-aware cache adapter. + - remove cluster-label enforcement paths once migration completes. + +- `pkg/multicluster/options.go` + - add options for placement resolver selection and layout version. + +- `cmd/apiserver/app/config.go` + - wire new decorator/options into server config path. + +- `pkg/multicluster/storage_test.go` + - adapt tests to assert isolation without relying on persisted cluster label. + +- `test/smoke/*.go` (targeted) + - add/adjust smoke tests for list/watch isolation with no visible cluster label. + +## Wiring Plan + +### 1) Decorator Wiring +Continue using `RESTOptionsDecorator` entrypoint, but swap internals: +- current: wraps storage and relies on object metadata for cache keying. +- proposed: wraps storage with key-aware cache/entry adapter. + +Wiring location: +- `cmd/apiserver/app/config.go` + - existing `decorateRESTOptionsGetter(...)` integration remains the anchor. + +### 2) Resolver Injection +Inject `PlacementResolver` via multicluster options: +- default resolver handles current `/.../clusters//...` layout. +- optional future resolver supports new key layouts. + +## Upstream Reuse vs Custom Implementation + +### Reuse from Upstream +- Generic registry strategy flow (`Create/Get/List/Update/Delete/Watch` contract). +- Etcd storage backend and encoding pipeline. +- Selection predicates and API machinery object conversion. +- Existing apiserver handler chain, authn/authz/admission plumbing. + +### Wrap / Extend Locally +- Multicluster RESTOptions decoration and key rewrite. +- Placement resolver and internal entry envelope. +- Shared cache dispatch keyed by cluster metadata. + +### Likely Not Reusable As-Is +- Upstream cacher assumptions where keying/indexing is object-only. + - We should avoid deep invasive upstream patching in-tree. + - Prefer local key-aware adapter/fork boundary with minimal surface area. + +## Migration Plan + +### Phase 0: Design and Scaffolding +- Add resolver and entry types. +- No behavior change. + +### Phase 1: Immediate Cutover +- Implement key-aware cache path and switch read/watch dispatch directly during implementation. +- Keep label writes temporarily for rollback safety. + +### Phase 2: Remove Label Dependency +- Stop requiring label for keying/indexing. +- Remove cluster-label mutating/validation requirements where safe. +- Add response-shape tests ensuring no server-owned cluster label leakage. + +### Phase 3: Cleanup +- Delete legacy code paths. +- Update docs and operational runbooks. + +## Validation and Test Plan +- Unit: + - resolver correctness for current and alternate layouts. + - key-aware cache fanout correctness and concurrency behavior. + - watch event ordering and bookmark behavior. +- Integration: + - existing smoke suite must pass. + - dedicated tests for list/watch isolation with absent cluster label. + - CRD/list/watch behavior parity tests. +- Performance: + - compare goroutine count, memory, and watch fanout metrics vs current model. + +## Risks and Mitigations +- **Risk**: subtle watch semantics regressions. + - Mitigation: broaden integration/smoke coverage and validate watch semantics in targeted tests before merge. +- **Risk**: cache key collisions across clusters. + - Mitigation: explicit `(clusterID, namespace, name)` composite indexes. +- **Risk**: rollout complexity. + - Mitigation: feature gate + phased migration + smoke regression gates. + +## Open Questions +- Should we expose layout version in metrics for easier migrations? +- Where should fork boundary live to minimize divergence from upstream changes? + +## Decision +Proceed with key-aware internal entry and placement resolver design, preserving +shared informer/watch fanout while removing object-label dependency for cluster +identity. diff --git a/go.mod b/go.mod index 2ec4e55..1904141 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,14 @@ module github.com/kplane-dev/apiserver -go 1.24.6 +go 1.25.0 require ( + github.com/kplane-dev/informer v0.0.0-00010101000000-000000000000 + github.com/kplane-dev/storage v0.0.0 github.com/spf13/cobra v1.10.1 - go.opentelemetry.io/otel v1.38.0 - golang.org/x/sync v0.17.0 + go.etcd.io/etcd/client/v3 v3.6.7 + go.opentelemetry.io/otel v1.40.0 + golang.org/x/sync v0.19.0 gopkg.in/evanphx/json-patch.v4 v4.13.0 k8s.io/api v0.34.1 k8s.io/apiextensions-apiserver v0.34.1 @@ -16,42 +19,49 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kube-aggregator v0.34.1 k8s.io/kubernetes v1.34.1 - k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d + k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 ) replace ( - k8s.io/api => k8s.io/api v0.34.1 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.34.1 - k8s.io/apimachinery => k8s.io/apimachinery v0.34.1 - k8s.io/apiserver => k8s.io/apiserver v0.34.1 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.34.1 - k8s.io/client-go => k8s.io/client-go v0.34.1 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.34.1 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.34.1 - k8s.io/code-generator => k8s.io/code-generator v0.34.1 - k8s.io/component-base => k8s.io/component-base v0.34.1 - k8s.io/component-helpers => k8s.io/component-helpers v0.34.1 - k8s.io/controller-manager => k8s.io/controller-manager v0.34.1 - k8s.io/cri-api => k8s.io/cri-api v0.34.1 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.34.1 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.34.1 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.34.1 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.34.1 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.34.1 - k8s.io/kubectl => k8s.io/kubectl v0.34.1 - k8s.io/kubelet => k8s.io/kubelet v0.34.1 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.34.1 - k8s.io/metrics => k8s.io/metrics v0.34.1 - k8s.io/mount-utils => k8s.io/mount-utils v0.34.1 - k8s.io/node-api => k8s.io/node-api v0.34.1 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.34.1 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.34.1 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.34.1 - k8s.io/sample-controller => k8s.io/sample-controller v0.34.1 + github.com/kplane-dev/informer => github.com/kplane-dev/informer v0.0.0-20260303050920-e9c86850386e + github.com/kplane-dev/storage => github.com/kplane-dev/storage v0.0.0-20260303050750-8ad94e8ce404 + k8s.io/api => github.com/kplane-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/apiextensions-apiserver => github.com/kplane-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/apimachinery => github.com/kplane-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/apiserver => github.com/kplane-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/cli-runtime => github.com/kplane-dev/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/client-go => github.com/kplane-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/cloud-provider => github.com/kplane-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/cluster-bootstrap => github.com/kplane-dev/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/code-generator => github.com/kplane-dev/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/component-base => github.com/kplane-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/component-helpers => github.com/kplane-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/controller-manager => github.com/kplane-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/cri-api => github.com/kplane-dev/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/cri-client => github.com/kplane-dev/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/csi-translation-lib => github.com/kplane-dev/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/dynamic-resource-allocation => github.com/kplane-dev/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/endpointslice => github.com/kplane-dev/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/externaljwt => github.com/kplane-dev/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/kms => github.com/kplane-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/kube-aggregator => github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/kube-controller-manager => github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/kube-proxy => github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/kube-scheduler => github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/kubectl => github.com/kplane-dev/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/kubelet => github.com/kplane-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/kubernetes => github.com/kplane-dev/kubernetes v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/metrics => github.com/kplane-dev/kubernetes/staging/src/k8s.io/metrics v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/mount-utils => github.com/kplane-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/pod-security-admission => github.com/kplane-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/sample-apiserver => github.com/kplane-dev/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/sample-cli-plugin => github.com/kplane-dev/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20260303044756-e9e2a52adaf0 + k8s.io/sample-controller => github.com/kplane-dev/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20260303044756-e9e2a52adaf0 ) require ( cel.dev/expr v0.24.0 // indirect + cyphar.com/go-pathrs v0.2.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect @@ -59,10 +69,11 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/coreos/go-oidc v2.4.0+incompatible // indirect + github.com/coreos/go-oidc v2.5.0+incompatible // indirect github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd/v22 v22.6.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/coreos/go-systemd/v22 v22.7.0 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -87,14 +98,13 @@ require ( github.com/go-openapi/swag/yamlutils v0.24.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.26.1 // indirect github.com/google/gnostic-models v0.7.0 // indirect - github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -108,46 +118,45 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/selinux v1.11.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/opencontainers/selinux v1.13.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/cachecontrol v0.2.0 // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/stoewer/go-strcase v1.3.1 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/api/v3 v3.6.5 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.6.5 // indirect - go.etcd.io/etcd/client/v3 v3.6.5 // indirect + go.etcd.io/etcd/api/v3 v3.6.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.7 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.opentelemetry.io/proto/otlp v1.8.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/sdk v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect + go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect + go.uber.org/zap v1.27.1 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.42.0 // indirect - golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect - golang.org/x/net v0.44.0 // indirect - golang.org/x/oauth2 v0.31.0 // indirect - golang.org/x/sys v0.36.0 // indirect - golang.org/x/term v0.35.0 // indirect - golang.org/x/text v0.29.0 // indirect - golang.org/x/time v0.13.0 // indirect - golang.org/x/tools v0.37.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250922171735-9219d122eba9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9 // indirect - google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.9 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 // indirect + golang.org/x/net v0.49.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.40.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect + google.golang.org/grpc v1.78.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect @@ -162,13 +171,18 @@ require ( k8s.io/endpointslice v0.34.1 // indirect k8s.io/externaljwt v0.34.1 // indirect k8s.io/kms v0.34.1 // indirect - k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/kube-controller-manager v0.0.0 // indirect + k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 // indirect + k8s.io/kube-proxy v0.0.0 // indirect + k8s.io/kube-scheduler v0.0.0 // indirect + k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.34.1 // indirect + k8s.io/metrics v0.0.0 // indirect k8s.io/mount-utils v0.34.1 // indirect k8s.io/pod-security-admission v0.0.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 6e6ec58..36b41cb 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,11 @@ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cyphar.com/go-pathrs v0.2.2 h1:y9w7hxbkr3zEL78Fjzeg4HEhs2xNy+fbwHiHGJJY2Xo= +cyphar.com/go-pathrs v0.2.2/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= @@ -18,18 +22,21 @@ github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1x github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/coreos/go-oidc v2.4.0+incompatible h1:xjdlhLWXcINyUJgLQ9I76g7osgC2goiL6JDXS6Fegjk= -github.com/coreos/go-oidc v2.4.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.5.0+incompatible h1:6W0vGJR3Tu0r0PwfmjOrRZSlfxeEln8dsejt3ZWIvwo= +github.com/coreos/go-oidc v2.5.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= -github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= +github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA= +github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -81,8 +88,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= -github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= @@ -94,20 +101,18 @@ github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7O github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA= -github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 h1:QGLs/O40yoNK9vmy4rhUGBVyMf1lISBGtXRpsu/Qu/o= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0/go.mod h1:hM2alZsMUni80N33RBe6J0e423LB+odMj7d3EMP9l20= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= @@ -120,6 +125,64 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kplane-dev/informer v0.0.0-20260303050920-e9c86850386e h1:alhKoCGQhvNyKi+8h0/m3N46/mfxmvzUAK4gFabPsJk= +github.com/kplane-dev/informer v0.0.0-20260303050920-e9c86850386e/go.mod h1:Nd1KQEeObbfGhP3NZsLNrOtNVDl6h4L3l4R52FQrJYI= +github.com/kplane-dev/kubernetes v0.0.0-20260303044756-e9e2a52adaf0 h1:6s12hLrc1qmwLFEhI5OofZ7J07sw8q7pg552jgNlpTU= +github.com/kplane-dev/kubernetes v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:9uEdeMBkUjkGSL2ioy2pKr0+aLNIvmOan7p8F7ENHbk= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20260303044756-e9e2a52adaf0 h1:sQjyTi6O+gAnxvjltaNQk77Q+0gCPopYDH9P1YZkN/k= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:KOrdwhDi3QHVXr50HAWBhO2r9uMtWbO46CiIHzRVtU8= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20260303044756-e9e2a52adaf0 h1:SC2NAxlp1NfqJo6lfPgGWYDdqlT3A39UpmvlOlznvKM= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:wZyuUfbqWsRKO42GvZVHZSJ0JttMO7YrXnu/bv/GqPg= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20260303044756-e9e2a52adaf0 h1:2TJwa1qB8fIP5+SxnZydicT9uxKFnVx92afDO8O1tVE= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:7mgr/dli8ofwAbcIQXetFVX1fbOYsOYojq3AUbybVmQ= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20260303044756-e9e2a52adaf0 h1:ylj6Fc6O3iXH6pLyfYEFhXjNjgEm9v3mVt8jWsdZeYY= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:GNWcUSRqjpm4i1hrLaGA7EQrl60YdahMic4aS+WUQVI= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20260303044756-e9e2a52adaf0 h1:RM/DJi1rJcAb7RQI+Zt5NxHJNk2cAM+KDVl9Xl9lxIU= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:7IM9p4c8CafSxF7ZY0F46WHylFn3o4mLVW5T1VZbaY8= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20260303044756-e9e2a52adaf0 h1:2PkgzWXTPKwkcuzEcxsMTUf8llfcOl5rzR3r1UbSLVw= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:bGWrOCyYWe9G/1zpB0hSLAO+BnZUNbNVWXcHAzfaF+E= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20260303044756-e9e2a52adaf0 h1:dt9wd4bwSNXxi8Ma/PdECxhVAotI8LVUusSPMzSf740= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:CxFNOU1/U4gfM4SU7hRkzCRhTfUweuTLRWKWh6yitcE= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20260303044756-e9e2a52adaf0 h1:azL0YSPSlc/id0oPDs4hXeWSiv8HO9vrNhxEl6LdbVY= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:R6vYa1XRfX3PdQEGNkCaL3pt7NvLU2ti7FPzsEsA6GQ= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20260303044756-e9e2a52adaf0 h1:c3DFoxrOUhhzUIyLOjGRvFGABKKPF1aDgq5xYHpaOEI= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:axBpPWJYMZFstDUXj45ooTW3njLMa7B8Kz8o4QY5xfk= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20260303044756-e9e2a52adaf0 h1:FUKLrOqS04/6uSpEVD0cLCT9+IRCuQpgEceDpJ9PoNs= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:gjjSpBn/ZS8VRB+BMH7Ttd2kk+Hr+IYvm4LwBwQAtaU= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20260303044756-e9e2a52adaf0 h1:Q3mSnGo3lyBFi83ztys5jItqPuqqGDIi0HPmlWCclb8= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:sez47HjW1useDMcQeVkW7kQNjwzaLJlaJoMcWYU6WVE= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20260303044756-e9e2a52adaf0 h1:PNPWPY/kn7vuwIYFeXCWdc0A06S8Mm08jzmErUisus4= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:rVFUCMkvRUmEXFCLoBSsAbnSarFL/9tYTe6kE+nh/a0= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20260303044756-e9e2a52adaf0 h1:QOHDI3guNsPh6OlJqVMOu/wgwYLlVtMhm4U05oNDa8c= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:h4+rRn/HN1k7Uln9nnxykSWPI5ZBQCBJyHosyW4F7nk= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20260303044756-e9e2a52adaf0 h1:LUI/X6tSzQknb1Sj6NTdwZlqjPcpu1i5Ncyz8SQVlGo= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:2UvGX2oAJmCnqKSEBlBMnCEWisTXl2ZXQK/quJy61Q4= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20260303044756-e9e2a52adaf0 h1:7ow1+bA4PhrMBdYxnOwjHrSRHYZJrIeNXjjmGH3d0C4= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:fdIUBx+W5Ypy0TBvfW8qonHr4QV3iXw+N+ADcGYf8eI= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20260303044756-e9e2a52adaf0 h1:Q+5yNIJcssUqBYZLqrDOB4SukPAutQDtzYdowPDsfUg= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:+8tzBBizhTus+bM0JPY/4h2O26NxIQgcEBdK5Xdfehc= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20260303044756-e9e2a52adaf0 h1:No3QOZxohl0wJ9y8FIeS+40LQaKYsKi/9JDNSN64u/s= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kms v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:rhO9JHsWibaRLx12/ViuMQdeZi43hsiXCl8KEEihjaw= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20260303044756-e9e2a52adaf0 h1:aa+2RkO/nOKISlkzNxnHsGRSlqYgdN3f0K9hmUDryP0= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:KXJ9MlZYxlzOWKhHqfdxOTDdmfk/+ftdOwN//JGQs0g= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20260303044756-e9e2a52adaf0 h1:M93LWZ6iNDKr5lKTKc727Idty1Yb28cqgSfM9iBS+F0= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:fvSj40FltpPVU4KLda+I/WhFTwwiGuPpzpy+5UeiOaM= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20260303044756-e9e2a52adaf0 h1:3irDg3wS8FC2Ad2etU4zIuvPOrvqZLLn/uAL+zarO7c= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:1nDymEUiZa0nIsVZkeSAFISoJsp0Nnd40vWuipDGLv0= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20260303044756-e9e2a52adaf0 h1:oTGF1xkyClEHgE6tPJRea+nr+qj87xhk7spFpczhCUg= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:5ZwsfUdHnj/nEAAdk8utKGDG6c4eZTyZg/7BHPE4uaQ= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20260303044756-e9e2a52adaf0 h1:UMxEse6cLj5UVoPrnUvCLXejOy1Q0NQjZ8f22mYuOXI= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:ZmlXPtm3wJFF6x/7QU8q/op7GHIyBvnUjsbkud1NeNQ= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20260303044756-e9e2a52adaf0 h1:zUmxHL3TG3Ou7SN/zU0I5ykFaTOuterfjqAPIJWgb7c= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:h59njVygVWeHIhmMqMoocQNjUyC3q8D51fZNSuMEAis= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/metrics v0.0.0-20260303044756-e9e2a52adaf0 h1:FrczvZ9dkyIrBdL9dMjeFy4bAe1mdWZOeJklFJUxmpE= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/metrics v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:5vBQ1SE0Ed1m5R6BkCKRXDboEjmKbbO17EyrF6XXiqA= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20260303044756-e9e2a52adaf0 h1:JuZNurkMdNL7PrkddsBKsbJuEtCGaC6lB5fvGsVHwsU= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:0tRkeDDp5zQDR/JKhcttJMwq3mTWqNm5GE2DgjfRvsY= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20260303044756-e9e2a52adaf0 h1:VD7WwvMpgBTi++h1LpLszj9fZXBwMhPtb6jIjlpLeGo= +github.com/kplane-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20260303044756-e9e2a52adaf0/go.mod h1:2Hv3+Ga3SCWHO9E8+Ym58CioDvWe7wW21gy3nVetVNw= +github.com/kplane-dev/storage v0.0.0-20260303050750-8ad94e8ce404 h1:WBSxvdd1pjj2BesniHdRIBklW84P8KQfjvc/RRCvNJg= +github.com/kplane-dev/storage v0.0.0-20260303050750-8ad94e8ce404/go.mod h1:CApTI1DlyUgXF/p8uWvPVQ0pyPoeY7Yt+enSu57gPoo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -144,26 +207,27 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.27.4 h1:fcEcQW/A++6aZAZQNUmNjvA9PSOzefMJBerHJ4t8v8Y= +github.com/onsi/ginkgo/v2 v2.27.4/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.39.0 h1:y2ROC3hKFmQZJNFeGAMeHZKkjBL65mIZcvrLQBF9k6Q= +github.com/onsi/gomega v1.39.0/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= -github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/opencontainers/selinux v1.13.1 h1:A8nNeceYngH9Ow++M+VVEwJVpdFmrlxsN22F+ISDCJE= +github.com/opencontainers/selinux v1.13.1/go.mod h1:S10WXZ/osk2kWOYKy1x2f/eXF5ZHJoUs8UU/2caNRbg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -200,48 +264,48 @@ github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chq github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= -go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= -go.etcd.io/etcd/api/v3 v3.6.5 h1:pMMc42276sgR1j1raO/Qv3QI9Af/AuyQUW6CBAWuntA= -go.etcd.io/etcd/api/v3 v3.6.5/go.mod h1:ob0/oWA/UQQlT1BmaEkWQzI0sJ1M0Et0mMpaABxguOQ= -go.etcd.io/etcd/client/pkg/v3 v3.6.5 h1:Duz9fAzIZFhYWgRjp/FgNq2gO1jId9Yae/rLn3RrBP8= -go.etcd.io/etcd/client/pkg/v3 v3.6.5/go.mod h1:8Wx3eGRPiy0qOFMZT/hfvdos+DjEaPxdIDiCDUv/FQk= -go.etcd.io/etcd/client/v3 v3.6.5 h1:yRwZNFBx/35VKHTcLDeO7XVLbCBFbPi+XV4OC3QJf2U= -go.etcd.io/etcd/client/v3 v3.6.5/go.mod h1:ZqwG/7TAFZ0BJ0jXRPoJjKQJtbFo/9NIY8uoFFKcCyo= -go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA= -go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE= -go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU= -go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg= +go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= +go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= +go.etcd.io/etcd/api/v3 v3.6.7 h1:7BNJ2gQmc3DNM+9cRkv7KkGQDayElg8x3X+tFDYS+E0= +go.etcd.io/etcd/api/v3 v3.6.7/go.mod h1:xJ81TLj9hxrYYEDmXTeKURMeY3qEDN24hqe+q7KhbnI= +go.etcd.io/etcd/client/pkg/v3 v3.6.7 h1:vvzgyozz46q+TyeGBuFzVuI53/yd133CHceNb/AhBVs= +go.etcd.io/etcd/client/pkg/v3 v3.6.7/go.mod h1:2IVulJ3FZ/czIGl9T4lMF1uxzrhRahLqe+hSgy+Kh7Q= +go.etcd.io/etcd/client/v3 v3.6.7 h1:9WqA5RpIBtdMxAy1ukXLAdtg2pAxNqW5NUoO2wQrE6U= +go.etcd.io/etcd/client/v3 v3.6.7/go.mod h1:2XfROY56AXnUqGsvl+6k29wrwsSbEh1lAouQB1vHpeE= +go.etcd.io/etcd/pkg/v3 v3.6.7 h1:qIxdSI+LAmKFAjMy42yHQzSNqG/sWES4QjhFSGsMDpY= +go.etcd.io/etcd/pkg/v3 v3.6.7/go.mod h1:nPbpIExp9Q6tR/EVI2aZe0VBlflLys5VGFWSCmqUOyk= +go.etcd.io/etcd/server/v3 v3.6.7 h1:8dEGQ877tj0cQJFEfD2bDoZDA76qbS2OkvCNjwAyrSo= +go.etcd.io/etcd/server/v3 v3.6.7/go.mod h1:LEM328bPA2uVMhN0+Ht/vAsADW127QS1oM7EuHrOTy0= go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ= go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= -go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0 h1:XmiuHzgJt067+a6kwyAzkhXooYVv3/TOw9cM2VfJgUM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0/go.mod h1:KDgtbWKTQs4bM+VPUr6WlL9m/WXcmkCcBlIzqxPGzmI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= @@ -249,59 +313,61 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= -golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= -golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= +golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 h1:fQsdNF2N+/YewlRZiricy4P1iimyPKZ/xwniHj8Q2a0= +golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= -golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= -golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= -golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= -golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= -golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= -golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= -golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto/googleapis/api v0.0.0-20250922171735-9219d122eba9 h1:jm6v6kMRpTYKxBRrDkYAitNJegUeO1Mf3Kt80obv0gg= -google.golang.org/genproto/googleapis/api v0.0.0-20250922171735-9219d122eba9/go.mod h1:LmwNphe5Afor5V3R5BppOULHOnt2mCIf+NxMd4XiygE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9 h1:V1jCN2HBa8sySkR5vLcCSqJSTMv093Rw9EJefhQGP7M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= -google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= -google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= -google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -316,63 +382,19 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYs gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= -k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= -k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= -k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= -k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= -k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= -k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= -k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= -k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= -k8s.io/cloud-provider v0.34.1 h1:FS+4C1vq9pIngd/5LR5Jha1sEbn+fo0HJitgZmUyBNc= -k8s.io/cloud-provider v0.34.1/go.mod h1:ghyQYfQIWZAXKNS+TEgEiQ8wPuhzIVt3wFO6rKqS/rQ= -k8s.io/cluster-bootstrap v0.34.1 h1:lyCwJKoeYzGI93vk5Sn/Gz2rzfTRXkRuZYOk2rUsHfA= -k8s.io/cluster-bootstrap v0.34.1/go.mod h1:9EJfkp7Fu4YBU0F6ysvrI5TndWLo8zufmDSjIWBNd94= -k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= -k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= -k8s.io/component-helpers v0.34.1 h1:gWhH3CCdwAx5P3oJqZKb4Lg5FYZTWVbdWtOI8n9U4XY= -k8s.io/component-helpers v0.34.1/go.mod h1:4VgnUH7UA/shuBur+OWoQC0xfb69sy/93ss0ybZqm3c= -k8s.io/controller-manager v0.34.1 h1:c9Cmun/zF740kmdRQWPGga+4MglT5SlrwsCXDS/KtJI= -k8s.io/controller-manager v0.34.1/go.mod h1:fGiJDhi3OSzSAB4f40ZkJLAqMQSag9RM+7m5BRhBO3Q= -k8s.io/cri-api v0.34.1 h1:n2bU++FqqJq0CNjP/5pkOs0nIx7aNpb1Xa053TecQkM= -k8s.io/cri-api v0.34.1/go.mod h1:4qVUjidMg7/Z9YGZpqIDygbkPWkg3mkS1PvOx/kpHTE= -k8s.io/cri-client v0.34.1 h1:eq6FcEPDDL379w0WhPnItj2egsMZqOtU7nv1JaJmwP0= -k8s.io/cri-client v0.34.1/go.mod h1:Dq6mKWV2ugO5tMv4xqVgcQ8vD7csP//e4KkzcFi2Pio= -k8s.io/csi-translation-lib v0.34.1 h1:8+QMIWBwPGFsqWw9eAvimA2GaHXGgLLYT61I1NzDnXw= -k8s.io/csi-translation-lib v0.34.1/go.mod h1:QXytPJ1KzYQaiMgVm82ANG+RGAUf276m8l9gFT+R6Xg= -k8s.io/dynamic-resource-allocation v0.34.1 h1:pd9qhOeAFkn8eOO4BthAiGHQc8pu+N6TK/2Fj+jaPwU= -k8s.io/dynamic-resource-allocation v0.34.1/go.mod h1:Zlpqyh6EKhTVoQDe5BS31/8oMXGfG6c12ydj3ChXyuw= -k8s.io/endpointslice v0.34.1 h1:+bxZVXN+6NUCyur42p6UkqBmSvXw6FChrwvvnOSbeho= -k8s.io/endpointslice v0.34.1/go.mod h1:70gpj+tfjoAXm3rPQhAdPAMywVtSPnOTSaau/hTeAYg= -k8s.io/externaljwt v0.34.1 h1:IPu6qFFAazvTvrl/dd72KsQv2X0d95nLQaqizJAeo68= -k8s.io/externaljwt v0.34.1/go.mod h1:LIqFAVwSkcWVlP3c78wxe2VGmgDySxfqX/wwXzVrV/Q= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.34.1 h1:iCFOvewDPzWM9fMTfyIPO+4MeuZ0tcZbugxLNSHFG4w= -k8s.io/kms v0.34.1/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM= -k8s.io/kube-aggregator v0.34.1 h1:WNLV0dVNoFKmuyvdWLd92iDSyD/TSTjqwaPj0U9XAEU= -k8s.io/kube-aggregator v0.34.1/go.mod h1:RU8j+5ERfp0h+gIvWtxRPfsa5nK7rboDm8RST8BJfYQ= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= -k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= -k8s.io/kubelet v0.34.1 h1:doAaTA9/Yfzbdq/u/LveZeONp96CwX9giW6b+oHn4m4= -k8s.io/kubelet v0.34.1/go.mod h1:PtV3Ese8iOM19gSooFoQT9iyRisbmJdAPuDImuccbbA= -k8s.io/kubernetes v1.34.1 h1:F3p8dtpv+i8zQoebZeK5zBqM1g9x1aIdnA5vthvcuUk= -k8s.io/kubernetes v1.34.1/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto= -k8s.io/mount-utils v0.34.1 h1:zMBEFav8Rxwm54S8srzy5FxAc4KQ3X4ZcjnqTCzHmZk= -k8s.io/mount-utils v0.34.1/go.mod h1:MIjjYlqJ0ziYQg0MO09kc9S96GIcMkhF/ay9MncF0GA= -k8s.io/pod-security-admission v0.34.1 h1:XsP5eh8qCj69hK0a5TBMU4Ed7Ckn8JEmmbk/iepj+XM= -k8s.io/pod-security-admission v0.34.1/go.mod h1:87yY36Gxc8Hjx24FxqAD5zMY4k0tP0u7Mu/XuwXEbmg= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 h1:HhDfevmPS+OalTjQRKbTHppRIz01AWi8s45TMXStgYY= +k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU= +k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 h1:hSfpvjjTQXQY2Fol2CS0QHMNs/WI1MOSGzCm1KhM5ec= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2 h1:kwVWMx5yS1CrnFWA/2QHyRVJ8jM6dBA80uLmm0wJkk8= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/pkg/multicluster/admission/mutating.go b/pkg/multicluster/admission/mutating.go index e90a26b..65747ed 100644 --- a/pkg/multicluster/admission/mutating.go +++ b/pkg/multicluster/admission/mutating.go @@ -6,7 +6,6 @@ import ( mcv1 "github.com/kplane-dev/apiserver/pkg/multicluster" mcauth "github.com/kplane-dev/apiserver/pkg/multicluster/auth" - "k8s.io/apimachinery/pkg/api/meta" apiserveradmission "k8s.io/apiserver/pkg/admission" authenticationapi "k8s.io/kubernetes/pkg/apis/authentication" ) @@ -45,27 +44,5 @@ func (m *Mutating) Admit(ctx context.Context, a apiserveradmission.Attributes, _ if gvk.Group == "authorization.k8s.io" || gvk.Group == "authentication.k8s.io" || strings.HasSuffix(gvk.Kind, "Review") { return nil } - obj := a.GetObject() - if obj == nil { - return nil - } - cid, _, _ := mcv1.FromContext(ctx) - if cid == "" { - cid = m.Options.DefaultCluster - } - accessor, err := meta.Accessor(obj) - if err != nil { - return nil - } - lbls := accessor.GetLabels() - if lbls == nil { - lbls = map[string]string{} - } - key := m.Options.ClusterAnnotationKey - if key == "" { - key = mcv1.DefaultClusterAnnotation - } - lbls[key] = cid - accessor.SetLabels(lbls) return nil } diff --git a/pkg/multicluster/admission/namespace/manager.go b/pkg/multicluster/admission/namespace/manager.go index 3717920..a93252e 100644 --- a/pkg/multicluster/admission/namespace/manager.go +++ b/pkg/multicluster/admission/namespace/manager.go @@ -3,12 +3,13 @@ package namespace import ( "sync" - "github.com/kplane-dev/apiserver/pkg/multicluster/scopedinformer" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" mc "github.com/kplane-dev/apiserver/pkg/multicluster" + "github.com/kplane-dev/apiserver/pkg/multicluster/typedinformer" ) type Options struct { @@ -23,8 +24,8 @@ type Options struct { // ClientPool caches per-cluster loopback clients. ClientPool *mc.ClientPool - // InformerPool shares informer factories across managers per cluster. - InformerPool *mc.InformerPool + // InformerRegistry provides MultiClusterInformers for resource types. + InformerRegistry *mc.InformerRegistry } type Manager struct { @@ -32,17 +33,10 @@ type Manager struct { mu sync.Mutex clusters map[string]*clusterEnv - - sharedOnce sync.Once - sharedErr error - shared informers.SharedInformerFactory - sharedStop <-chan struct{} - sharedOwn chan struct{} } type clusterEnv struct { - stopCh chan struct{} - cid string + cid string clientset kubernetes.Interface informers informers.SharedInformerFactory @@ -73,75 +67,30 @@ func (m *Manager) envForCluster(clusterID string) (*clusterEnv, error) { if err != nil { return nil, err } - scoped, err := m.scopedNamespaceFactory(clusterID) + + nsMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Resource: "namespaces"}) if err != nil { return nil, err } - stopCh := make(chan struct{}) + + factory := typedinformer.NewMCIFactory(typedinformer.MCIFactoryConfig{ + ClusterID: clusterID, + Namespaces: nsMCI, + }) e := &clusterEnv{ cid: clusterID, - stopCh: stopCh, clientset: cs, - informers: scoped, + informers: factory, } - // Warm the namespaces informer (used by NamespaceLifecycle). - _ = scoped.Core().V1().Namespaces().Informer() - scoped.Start(stopCh) m.clusters[clusterID] = e return e, nil } -func (m *Manager) scopedNamespaceFactory(clusterID string) (informers.SharedInformerFactory, error) { - shared, err := m.ensureSharedFactory() - if err != nil { - return nil, err - } - return newScopedFactory(clusterID, mc.DefaultClusterAnnotation, shared), nil -} - -func (m *Manager) ensureSharedFactory() (informers.SharedInformerFactory, error) { - m.sharedOnce.Do(func() { - if m.opts.BaseLoopbackClientConfig == nil { - m.sharedErr = mc.ErrMissingClientFactory - return - } - cs, err := scopedinformer.NewAllClustersKubeClient(m.opts.BaseLoopbackClientConfig) - if err != nil { - m.sharedErr = err - return - } - factory := informers.NewSharedInformerFactory(cs, 0) - if err := factory.Core().V1().Namespaces().Informer().SetTransform(transformNamespaceForShared(mc.DefaultClusterAnnotation)); err != nil { - m.sharedErr = err - return - } - if err := scopedinformer.EnsureClusterIndex(factory.Core().V1().Namespaces().Informer(), mc.DefaultClusterAnnotation); err != nil { - m.sharedErr = err - return - } - if m.sharedStop == nil { - m.sharedOwn = make(chan struct{}) - m.sharedStop = m.sharedOwn - } - factory.Start(m.sharedStop) - m.shared = factory - }) - if m.sharedErr != nil { - return nil, m.sharedErr - } - return m.shared, nil -} - // StopCluster is test-oriented cleanup; production can leave informers running. func (m *Manager) StopCluster(clusterID string) { m.mu.Lock() defer m.mu.Unlock() - if e, ok := m.clusters[clusterID]; ok { - if e.stopCh != nil { - close(e.stopCh) - } - delete(m.clusters, clusterID) - } + delete(m.clusters, clusterID) } diff --git a/pkg/multicluster/admission/namespace/plugins.go b/pkg/multicluster/admission/namespace/plugins.go index 01130ef..b2955c7 100644 --- a/pkg/multicluster/admission/namespace/plugins.go +++ b/pkg/multicluster/admission/namespace/plugins.go @@ -63,7 +63,7 @@ func (p *LifecyclePlugin) forCluster(ctx context.Context) *upstream.Lifecycle { panic(err) } - plugin, err := upstream.NewLifecycle(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem, metav1.NamespacePublic)) + plugin, err := upstream.NewLifecycle(sets.New[string](metav1.NamespaceDefault, metav1.NamespaceSystem, metav1.NamespacePublic)) if err != nil { panic(err) } diff --git a/pkg/multicluster/admission/namespace/scoped_factory.go b/pkg/multicluster/admission/namespace/scoped_factory.go deleted file mode 100644 index 1a615b4..0000000 --- a/pkg/multicluster/admission/namespace/scoped_factory.go +++ /dev/null @@ -1,206 +0,0 @@ -package namespace - -import ( - "fmt" - "reflect" - "strings" - - mc "github.com/kplane-dev/apiserver/pkg/multicluster" - "github.com/kplane-dev/apiserver/pkg/multicluster/scopedinformer" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/informers" - coreinformers "k8s.io/client-go/informers/core" - coreinformersv1 "k8s.io/client-go/informers/core/v1" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" -) - -const sharedNamespaceNamePrefix = "__mcns__" - -type scopedFactory struct { - informers.SharedInformerFactory - clusterID string - clusterLabelKey string - shared informers.SharedInformerFactory -} - -func newScopedFactory(clusterID, clusterLabelKey string, shared informers.SharedInformerFactory) informers.SharedInformerFactory { - if clusterLabelKey == "" { - clusterLabelKey = mc.DefaultClusterAnnotation - } - return &scopedFactory{ - SharedInformerFactory: shared, - clusterID: clusterID, - clusterLabelKey: clusterLabelKey, - shared: shared, - } -} - -func (f *scopedFactory) Core() coreinformers.Interface { return &scopedCoreGroup{f: f} } - -func (f *scopedFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - out := f.SharedInformerFactory.WaitForCacheSync(stopCh) - if out == nil { - out = map[reflect.Type]bool{} - } - out[reflect.TypeOf(&corev1.Namespace{})] = f.shared.Core().V1().Namespaces().Informer().HasSynced() - return out -} - -type scopedCoreGroup struct{ f *scopedFactory } - -func (g *scopedCoreGroup) V1() coreinformersv1.Interface { return &scopedCoreV1{f: g.f} } - -type scopedCoreV1 struct{ f *scopedFactory } - -func (v *scopedCoreV1) ComponentStatuses() coreinformersv1.ComponentStatusInformer { - return v.f.SharedInformerFactory.Core().V1().ComponentStatuses() -} -func (v *scopedCoreV1) ConfigMaps() coreinformersv1.ConfigMapInformer { - return v.f.SharedInformerFactory.Core().V1().ConfigMaps() -} -func (v *scopedCoreV1) Endpoints() coreinformersv1.EndpointsInformer { - return v.f.SharedInformerFactory.Core().V1().Endpoints() -} -func (v *scopedCoreV1) Events() coreinformersv1.EventInformer { - return v.f.SharedInformerFactory.Core().V1().Events() -} -func (v *scopedCoreV1) LimitRanges() coreinformersv1.LimitRangeInformer { - return v.f.SharedInformerFactory.Core().V1().LimitRanges() -} -func (v *scopedCoreV1) Namespaces() coreinformersv1.NamespaceInformer { - base := v.f.shared.Core().V1().Namespaces().Informer() - return &scopedNamespaceInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedNamespaceLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID}, - } -} -func (v *scopedCoreV1) Nodes() coreinformersv1.NodeInformer { - return v.f.SharedInformerFactory.Core().V1().Nodes() -} -func (v *scopedCoreV1) PersistentVolumes() coreinformersv1.PersistentVolumeInformer { - return v.f.SharedInformerFactory.Core().V1().PersistentVolumes() -} -func (v *scopedCoreV1) PersistentVolumeClaims() coreinformersv1.PersistentVolumeClaimInformer { - return v.f.SharedInformerFactory.Core().V1().PersistentVolumeClaims() -} -func (v *scopedCoreV1) Pods() coreinformersv1.PodInformer { - return v.f.SharedInformerFactory.Core().V1().Pods() -} -func (v *scopedCoreV1) PodTemplates() coreinformersv1.PodTemplateInformer { - return v.f.SharedInformerFactory.Core().V1().PodTemplates() -} -func (v *scopedCoreV1) ReplicationControllers() coreinformersv1.ReplicationControllerInformer { - return v.f.SharedInformerFactory.Core().V1().ReplicationControllers() -} -func (v *scopedCoreV1) ResourceQuotas() coreinformersv1.ResourceQuotaInformer { - return v.f.SharedInformerFactory.Core().V1().ResourceQuotas() -} -func (v *scopedCoreV1) Secrets() coreinformersv1.SecretInformer { - return v.f.SharedInformerFactory.Core().V1().Secrets() -} -func (v *scopedCoreV1) Services() coreinformersv1.ServiceInformer { - return v.f.SharedInformerFactory.Core().V1().Services() -} -func (v *scopedCoreV1) ServiceAccounts() coreinformersv1.ServiceAccountInformer { - return v.f.SharedInformerFactory.Core().V1().ServiceAccounts() -} - -func newFilteredSharedIndexInformer(shared cache.SharedIndexInformer, clusterID, clusterLabelKey string) cache.SharedIndexInformer { - return scopedinformer.NewFilteredSharedIndexInformer(shared, clusterID, clusterLabelKey) -} - -func objectCluster(obj interface{}, clusterLabelKey string) string { - return scopedinformer.ObjectCluster(obj, clusterLabelKey) -} - -func filteredByCluster(indexer cache.Indexer, clusterID string) []interface{} { - return scopedinformer.FilteredByCluster(indexer, clusterID) -} - -func transformNamespaceForShared(clusterLabelKey string) cache.TransformFunc { - return func(obj interface{}) (interface{}, error) { - cid := objectCluster(obj, clusterLabelKey) - if cid == "" { - return obj, nil - } - ns, ok := obj.(*corev1.Namespace) - if !ok { - return obj, nil - } - cp := ns.DeepCopy() - cp.Name = encodeSharedNamespaceName(cid, cp.Name) - return cp, nil - } -} - -func encodeSharedNamespaceName(clusterID, name string) string { - if clusterID == "" || name == "" { - return name - } - prefix := sharedNamespaceNamePrefix + clusterID + "__" - if strings.HasPrefix(name, prefix) { - return name - } - return prefix + name -} - -func decodeSharedNamespaceName(clusterID, name string) (string, bool) { - prefix := sharedNamespaceNamePrefix + clusterID + "__" - if strings.HasPrefix(name, prefix) { - return strings.TrimPrefix(name, prefix), true - } - return name, false -} - -type scopedNamespaceInformer struct { - informer cache.SharedIndexInformer - lister corelisters.NamespaceLister -} - -func (i *scopedNamespaceInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedNamespaceInformer) Lister() corelisters.NamespaceLister { return i.lister } - -type scopedNamespaceLister struct { - indexer cache.Indexer - clusterID string -} - -func (l *scopedNamespaceLister) List(sel labels.Selector) (ret []*corev1.Namespace, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Namespace) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - cp := obj.DeepCopy() - cp.Name, _ = decodeSharedNamespaceName(l.clusterID, cp.Name) - ret = append(ret, cp) - } - } - return ret, nil -} - -func (l *scopedNamespaceLister) Get(name string) (*corev1.Namespace, error) { - encoded := encodeSharedNamespaceName(l.clusterID, name) - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Namespace) - if !ok { - continue - } - if obj.Name == name || obj.Name == encoded { - cp := obj.DeepCopy() - cp.Name, _ = decodeSharedNamespaceName(l.clusterID, cp.Name) - return cp, nil - } - } - return nil, fmt.Errorf("namespace %q not found", name) -} - -var _ corelisters.NamespaceLister = (*scopedNamespaceLister)(nil) -var _ coreinformersv1.NamespaceInformer = (*scopedNamespaceInformer)(nil) -var _ informers.SharedInformerFactory = (*scopedFactory)(nil) diff --git a/pkg/multicluster/admission/validating.go b/pkg/multicluster/admission/validating.go index 5a01a91..88264cf 100644 --- a/pkg/multicluster/admission/validating.go +++ b/pkg/multicluster/admission/validating.go @@ -2,11 +2,9 @@ package admission import ( "context" - "fmt" "strings" mcv1 "github.com/kplane-dev/apiserver/pkg/multicluster" - "k8s.io/apimachinery/pkg/api/meta" apiserveradmission "k8s.io/apiserver/pkg/admission" ) @@ -32,46 +30,5 @@ func (v *Validating) Validate(ctx context.Context, a apiserveradmission.Attribut if gvk.Group == "authorization.k8s.io" || gvk.Group == "authentication.k8s.io" || strings.HasSuffix(gvk.Kind, "Review") { return nil } - key := v.Options.ClusterAnnotationKey - if key == "" { - key = mcv1.DefaultClusterAnnotation - } - reqCID, _, _ := mcv1.FromContext(ctx) - if reqCID == "" { - reqCID = v.Options.DefaultCluster - } - - if a.GetOperation() == apiserveradmission.Create { - obj := a.GetObject() - if obj == nil { - return nil - } - acc, err := meta.Accessor(obj) - if err != nil { - return nil - } - if cid := acc.GetLabels()[key]; cid != reqCID { - return fmt.Errorf("cluster label %q=%q must match request cluster %q", key, cid, reqCID) - } - return nil - } - - if a.GetOperation() == apiserveradmission.Update { - newObj := a.GetObject() - oldObj := a.GetOldObject() - if newObj == nil || oldObj == nil { - return nil - } - newAcc, err1 := meta.Accessor(newObj) - oldAcc, err2 := meta.Accessor(oldObj) - if err1 != nil || err2 != nil { - return nil - } - oldCID := oldAcc.GetLabels()[key] - newCID := newAcc.GetLabels()[key] - if (oldCID != "" && oldCID != reqCID) || (newCID != "" && newCID != oldCID) { - return fmt.Errorf("cross-cluster updates are forbidden (old=%q new=%q request=%q)", oldCID, newCID, reqCID) - } - } return nil } diff --git a/pkg/multicluster/admission/webhook/celruntime.go b/pkg/multicluster/admission/webhook/celruntime.go index 0987ebe..eadc174 100644 --- a/pkg/multicluster/admission/webhook/celruntime.go +++ b/pkg/multicluster/admission/webhook/celruntime.go @@ -6,8 +6,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission/plugin/cel" "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) @@ -113,7 +111,7 @@ func (r *CelRuntime) CacheSize() int { func (r *CelRuntime) baseEnvSet() (*environment.EnvSet, error) { r.baseOnce.Do(func() { - r.base = environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks)) + r.base = environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion()) }) return r.base, r.baseErr } diff --git a/pkg/multicluster/admission/webhook/generic/versioned_attributes.go b/pkg/multicluster/admission/webhook/generic/versioned_attributes.go new file mode 100644 index 0000000..bfd9534 --- /dev/null +++ b/pkg/multicluster/admission/webhook/generic/versioned_attributes.go @@ -0,0 +1,33 @@ +package generic + +import ( + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" +) + +// EnsureVersionedAttributesUserInfo guarantees that AdmissionReview construction +// always sees a non-nil user.Info, preventing nil dereferences in upstream +// request builders when attributes are missing user context. +func EnsureVersionedAttributesUserInfo(attr *admission.VersionedAttributes) *admission.VersionedAttributes { + if attr == nil || attr.Attributes == nil || attr.Attributes.GetUserInfo() != nil { + return attr + } + + cloned := *attr + cloned.Attributes = userInfoFallbackAttributes{Attributes: attr.Attributes} + return &cloned +} + +type userInfoFallbackAttributes struct { + admission.Attributes +} + +func (a userInfoFallbackAttributes) GetUserInfo() user.Info { + if a.Attributes == nil { + return &user.DefaultInfo{} + } + if info := a.Attributes.GetUserInfo(); info != nil { + return info + } + return &user.DefaultInfo{} +} diff --git a/pkg/multicluster/admission/webhook/generic/webhook.go b/pkg/multicluster/admission/webhook/generic/webhook.go index f968ad3..013b0f1 100644 --- a/pkg/multicluster/admission/webhook/generic/webhook.go +++ b/pkg/multicluster/admission/webhook/generic/webhook.go @@ -22,8 +22,6 @@ import ( "io" "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" admissionv1 "k8s.io/api/admission/v1" @@ -102,7 +100,7 @@ func NewWebhook(handler *admission.Handler, configFile io.Reader, sourceFactory namespaceMatcher: &namespace.Matcher{}, objectMatcher: &object.Matcher{}, dispatcher: dispatcherFactory(&cm), - filterCompiler: cel.NewConditionCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks))), + filterCompiler: cel.NewConditionCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())), }, nil } diff --git a/pkg/multicluster/admission/webhook/manager.go b/pkg/multicluster/admission/webhook/manager.go index d8ae25c..8be1b61 100644 --- a/pkg/multicluster/admission/webhook/manager.go +++ b/pkg/multicluster/admission/webhook/manager.go @@ -1,18 +1,18 @@ package webhook import ( - "fmt" - "reflect" "sync" + "time" + "k8s.io/apimachinery/pkg/runtime/schema" webhookutil "k8s.io/apiserver/pkg/util/webhook" clientgoinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" mc "github.com/kplane-dev/apiserver/pkg/multicluster" - "github.com/kplane-dev/apiserver/pkg/multicluster/scopedinformer" + "github.com/kplane-dev/apiserver/pkg/multicluster/typedinformer" + mcinformer "github.com/kplane-dev/informer" ) type Options struct { @@ -40,8 +40,8 @@ type Options struct { // ClientPool caches per-cluster loopback clients. ClientPool *mc.ClientPool - // InformerPool shares informer factories across managers per cluster. - InformerPool *mc.InformerPool + // InformerRegistry provides MultiClusterInformers for resource types. + InformerRegistry *mc.InformerRegistry } type Manager struct { @@ -49,20 +49,11 @@ type Manager struct { mu sync.Mutex clusters map[string]*clusterEnv - - sharedOnce sync.Once - sharedErr error - shared clientgoinformers.SharedInformerFactory - sharedStop <-chan struct{} - sharedOwn chan struct{} - sharedSync chan struct{} } type clusterEnv struct { cid string - stopCh <-chan struct{} - ownCh chan struct{} synced chan struct{} clientset kubernetes.Interface @@ -96,112 +87,79 @@ func (m *Manager) envForCluster(clusterID string) (*clusterEnv, error) { if err != nil { return nil, err } - scoped, err := m.scopedWebhookFactory(clusterID) + + nsMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Resource: "namespaces"}) + if err != nil { + return nil, err + } + servicesMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Resource: "services"}) + if err != nil { + return nil, err + } + endpointSlicesMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Group: "discovery.k8s.io", Resource: "endpointslices"}) + if err != nil { + return nil, err + } + mutatingMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Group: "admissionregistration.k8s.io", Resource: "mutatingwebhookconfigurations"}) if err != nil { return nil, err } - stopCh := make(chan struct{}) + validatingMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Group: "admissionregistration.k8s.io", Resource: "validatingwebhookconfigurations"}) + if err != nil { + return nil, err + } + + factory := typedinformer.NewMCIFactory(typedinformer.MCIFactoryConfig{ + ClusterID: clusterID, + Namespaces: nsMCI, + Services: servicesMCI, + EndpointSlices: endpointSlicesMCI, + MutatingWebhooks: mutatingMCI, + ValidatingWebhooks: validatingMCI, + }) sr := newDirectServiceResolver( - scoped.Core().V1().Services().Lister(), - scoped.Discovery().V1().EndpointSlices().Lister(), + typedinformer.NewServiceLister(servicesMCI, clusterID), + typedinformer.NewEndpointSliceLister(endpointSlicesMCI, clusterID), m.opts.EnableAggregatorRouting, m.opts.Hostname, ) + synced := make(chan struct{}) e := &clusterEnv{ cid: clusterID, - stopCh: stopCh, - ownCh: stopCh, - synced: m.sharedSync, + synced: synced, clientset: cs, - informers: scoped, + informers: factory, serviceResolver: sr, } - // Warm required informers (must happen before Start()). - _ = scoped.Core().V1().Namespaces().Informer() - _ = scoped.Core().V1().Services().Informer() - _ = scoped.Discovery().V1().EndpointSlices().Informer() - _ = scoped.Admissionregistration().V1().MutatingWebhookConfigurations().Informer() - _ = scoped.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer() - scoped.Start(stopCh) - - m.clusters[clusterID] = e - return e, nil -} - -func (m *Manager) scopedWebhookFactory(clusterID string) (clientgoinformers.SharedInformerFactory, error) { - shared, err := m.ensureSharedFactory() - if err != nil { - return nil, err - } - return newScopedFactory(clusterID, mc.DefaultClusterAnnotation, shared), nil -} - -func (m *Manager) ensureSharedFactory() (clientgoinformers.SharedInformerFactory, error) { - m.sharedOnce.Do(func() { - if m.opts.BaseLoopbackClientConfig == nil { - m.sharedErr = fmt.Errorf("base loopback config is required for shared webhook factory") - return - } - cs, err := scopedinformer.NewAllClustersKubeClient(m.opts.BaseLoopbackClientConfig) - if err != nil { - m.sharedErr = err - return - } - factory := clientgoinformers.NewSharedInformerFactory(cs, 0) - webhookInformers := []cache.SharedIndexInformer{ - factory.Core().V1().Namespaces().Informer(), - factory.Core().V1().Services().Informer(), - factory.Discovery().V1().EndpointSlices().Informer(), - factory.Admissionregistration().V1().MutatingWebhookConfigurations().Informer(), - factory.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer(), - } - for _, inf := range webhookInformers { - if err := scopedinformer.EnsureClusterIndex(inf, mc.DefaultClusterAnnotation); err != nil { - m.sharedErr = err + // Close synced channel when all MCIs have completed initial sync. + mcis := []*mcinformer.MultiClusterInformer{nsMCI, servicesMCI, endpointSlicesMCI, mutatingMCI, validatingMCI} + go func() { + for { + allSynced := true + for _, mci := range mcis { + if !mci.HasSynced() { + allSynced = false + break + } + } + if allSynced { + close(synced) return } + time.Sleep(50 * time.Millisecond) } - if m.sharedStop == nil { - m.sharedOwn = make(chan struct{}) - m.sharedStop = m.sharedOwn - } - factory.Start(m.sharedStop) - // One shared cache-sync signal for all clusters; scoped informers are projections over shared caches. - m.sharedSync = make(chan struct{}) - go func() { - ok := factory.WaitForCacheSync(m.sharedStop) - if allSynced(ok) { - close(m.sharedSync) - } - }() - m.shared = factory - }) - if m.sharedErr != nil { - return nil, m.sharedErr - } - return m.shared, nil -} + }() -func allSynced(m map[reflect.Type]bool) bool { - for _, v := range m { - if !v { - return false - } - } - return true + m.clusters[clusterID] = e + return e, nil } // StopCluster is test-oriented cleanup; production can leave informers running. func (m *Manager) StopCluster(clusterID string) { m.mu.Lock() defer m.mu.Unlock() - if e, ok := m.clusters[clusterID]; ok { - if e.ownCh != nil { - close(e.ownCh) - } - delete(m.clusters, clusterID) - } + delete(m.clusters, clusterID) } diff --git a/pkg/multicluster/admission/webhook/mutating/dispatcher.go b/pkg/multicluster/admission/webhook/mutating/dispatcher.go index b98716b..16120e0 100644 --- a/pkg/multicluster/admission/webhook/mutating/dispatcher.go +++ b/pkg/multicluster/admission/webhook/mutating/dispatcher.go @@ -254,7 +254,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *admiss } } - uid, request, response, err := webhookrequest.CreateAdmissionObjects(attr, invocation) + uid, request, response, err := webhookrequest.CreateAdmissionObjects(generic.EnsureVersionedAttributesUserInfo(attr), invocation) if err != nil { return false, &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("could not create admission objects: %w", err), Status: apierrors.NewBadRequest("error creating admission objects")} } diff --git a/pkg/multicluster/admission/webhook/scoped_factory.go b/pkg/multicluster/admission/webhook/scoped_factory.go deleted file mode 100644 index 9b74036..0000000 --- a/pkg/multicluster/admission/webhook/scoped_factory.go +++ /dev/null @@ -1,509 +0,0 @@ -package webhook - -import ( - "fmt" - "reflect" - - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - discoveryv1 "k8s.io/api/discovery/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/informers" - admissionregistrationinformers "k8s.io/client-go/informers/admissionregistration" - admissionregistrationinformersv1 "k8s.io/client-go/informers/admissionregistration/v1" - admissionregistrationinformersv1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1" - admissionregistrationinformersv1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" - apiserverinternalinformers "k8s.io/client-go/informers/apiserverinternal" - appsinformers "k8s.io/client-go/informers/apps" - autoscalinginformers "k8s.io/client-go/informers/autoscaling" - batchinformers "k8s.io/client-go/informers/batch" - certificatesinformers "k8s.io/client-go/informers/certificates" - coordinationinformers "k8s.io/client-go/informers/coordination" - coreinformers "k8s.io/client-go/informers/core" - coreinformersv1 "k8s.io/client-go/informers/core/v1" - discoveryinformers "k8s.io/client-go/informers/discovery" - discoveryinformersv1 "k8s.io/client-go/informers/discovery/v1" - discoveryinformersv1beta1 "k8s.io/client-go/informers/discovery/v1beta1" - eventsinformers "k8s.io/client-go/informers/events" - extensionsinformers "k8s.io/client-go/informers/extensions" - flowcontrolinformers "k8s.io/client-go/informers/flowcontrol" - internalinformers "k8s.io/client-go/informers/internalinterfaces" - networkinginformers "k8s.io/client-go/informers/networking" - nodeinformers "k8s.io/client-go/informers/node" - policyinformers "k8s.io/client-go/informers/policy" - rbacinformers "k8s.io/client-go/informers/rbac" - resourceinformers "k8s.io/client-go/informers/resource" - schedulinginformers "k8s.io/client-go/informers/scheduling" - storageinformers "k8s.io/client-go/informers/storage" - storagemigrationinformers "k8s.io/client-go/informers/storagemigration" - admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" - corelisters "k8s.io/client-go/listers/core/v1" - discoverylisters "k8s.io/client-go/listers/discovery/v1" - "k8s.io/client-go/tools/cache" - - mc "github.com/kplane-dev/apiserver/pkg/multicluster" - "github.com/kplane-dev/apiserver/pkg/multicluster/scopedinformer" -) - -type scopedFactory struct { - clusterID string - clusterLabelKey string - shared informers.SharedInformerFactory -} - -func newScopedFactory(clusterID, clusterLabelKey string, shared informers.SharedInformerFactory) informers.SharedInformerFactory { - if clusterLabelKey == "" { - clusterLabelKey = mc.DefaultClusterAnnotation - } - return &scopedFactory{ - clusterID: clusterID, - clusterLabelKey: clusterLabelKey, - shared: shared, - } -} - -func (f *scopedFactory) Start(stopCh <-chan struct{}) { - _ = stopCh -} - -func (f *scopedFactory) Shutdown() { - // shared informers are owned by manager-level lifecycle. -} - -func (f *scopedFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - out := f.shared.WaitForCacheSync(stopCh) - if out == nil { - out = map[reflect.Type]bool{} - } - if f.shared != nil { - out[reflect.TypeOf(&corev1.Namespace{})] = f.shared.Core().V1().Namespaces().Informer().HasSynced() - out[reflect.TypeOf(&corev1.Service{})] = f.shared.Core().V1().Services().Informer().HasSynced() - out[reflect.TypeOf(&discoveryv1.EndpointSlice{})] = f.shared.Discovery().V1().EndpointSlices().Informer().HasSynced() - out[reflect.TypeOf(&admissionregistrationv1.MutatingWebhookConfiguration{})] = f.shared.Admissionregistration().V1().MutatingWebhookConfigurations().Informer().HasSynced() - out[reflect.TypeOf(&admissionregistrationv1.ValidatingWebhookConfiguration{})] = f.shared.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer().HasSynced() - } - return out -} - -func (f *scopedFactory) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { - return f.shared.ForResource(resource) -} - -func (f *scopedFactory) InformerFor(obj runtime.Object, newFunc internalinformers.NewInformerFunc) cache.SharedIndexInformer { - return f.shared.InformerFor(obj, newFunc) -} - -func (f *scopedFactory) Core() coreinformers.Interface { - return &scopedCoreGroup{f: f} -} - -func (f *scopedFactory) Discovery() discoveryinformers.Interface { - return &scopedDiscoveryGroup{f: f} -} - -func (f *scopedFactory) Admissionregistration() admissionregistrationinformers.Interface { - return &scopedAdmissionregistrationGroup{f: f} -} - -func (f *scopedFactory) Internal() apiserverinternalinformers.Interface { return f.shared.Internal() } -func (f *scopedFactory) Apps() appsinformers.Interface { return f.shared.Apps() } -func (f *scopedFactory) Autoscaling() autoscalinginformers.Interface { return f.shared.Autoscaling() } -func (f *scopedFactory) Batch() batchinformers.Interface { return f.shared.Batch() } -func (f *scopedFactory) Certificates() certificatesinformers.Interface { - return f.shared.Certificates() -} -func (f *scopedFactory) Coordination() coordinationinformers.Interface { - return f.shared.Coordination() -} -func (f *scopedFactory) Events() eventsinformers.Interface { return f.shared.Events() } -func (f *scopedFactory) Extensions() extensionsinformers.Interface { return f.shared.Extensions() } -func (f *scopedFactory) Flowcontrol() flowcontrolinformers.Interface { return f.shared.Flowcontrol() } -func (f *scopedFactory) Networking() networkinginformers.Interface { return f.shared.Networking() } -func (f *scopedFactory) Node() nodeinformers.Interface { return f.shared.Node() } -func (f *scopedFactory) Policy() policyinformers.Interface { return f.shared.Policy() } -func (f *scopedFactory) Rbac() rbacinformers.Interface { return f.shared.Rbac() } -func (f *scopedFactory) Resource() resourceinformers.Interface { return f.shared.Resource() } -func (f *scopedFactory) Scheduling() schedulinginformers.Interface { return f.shared.Scheduling() } -func (f *scopedFactory) Storage() storageinformers.Interface { return f.shared.Storage() } -func (f *scopedFactory) Storagemigration() storagemigrationinformers.Interface { - return f.shared.Storagemigration() -} - -type scopedCoreGroup struct{ f *scopedFactory } - -func (g *scopedCoreGroup) V1() coreinformersv1.Interface { return &scopedCoreV1{f: g.f} } - -type scopedCoreV1 struct{ f *scopedFactory } - -func (v *scopedCoreV1) ComponentStatuses() coreinformersv1.ComponentStatusInformer { - return v.f.shared.Core().V1().ComponentStatuses() -} -func (v *scopedCoreV1) ConfigMaps() coreinformersv1.ConfigMapInformer { - return v.f.shared.Core().V1().ConfigMaps() -} -func (v *scopedCoreV1) Endpoints() coreinformersv1.EndpointsInformer { - return v.f.shared.Core().V1().Endpoints() -} -func (v *scopedCoreV1) Events() coreinformersv1.EventInformer { - return v.f.shared.Core().V1().Events() -} -func (v *scopedCoreV1) LimitRanges() coreinformersv1.LimitRangeInformer { - return v.f.shared.Core().V1().LimitRanges() -} -func (v *scopedCoreV1) Namespaces() coreinformersv1.NamespaceInformer { - base := v.f.shared.Core().V1().Namespaces().Informer() - return &scopedNamespaceInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedNamespaceLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID}, - } -} -func (v *scopedCoreV1) Nodes() coreinformersv1.NodeInformer { return v.f.shared.Core().V1().Nodes() } -func (v *scopedCoreV1) PersistentVolumes() coreinformersv1.PersistentVolumeInformer { - return v.f.shared.Core().V1().PersistentVolumes() -} -func (v *scopedCoreV1) PersistentVolumeClaims() coreinformersv1.PersistentVolumeClaimInformer { - return v.f.shared.Core().V1().PersistentVolumeClaims() -} -func (v *scopedCoreV1) Pods() coreinformersv1.PodInformer { return v.f.shared.Core().V1().Pods() } -func (v *scopedCoreV1) PodTemplates() coreinformersv1.PodTemplateInformer { - return v.f.shared.Core().V1().PodTemplates() -} -func (v *scopedCoreV1) ReplicationControllers() coreinformersv1.ReplicationControllerInformer { - return v.f.shared.Core().V1().ReplicationControllers() -} -func (v *scopedCoreV1) ResourceQuotas() coreinformersv1.ResourceQuotaInformer { - return v.f.shared.Core().V1().ResourceQuotas() -} -func (v *scopedCoreV1) Secrets() coreinformersv1.SecretInformer { - return v.f.shared.Core().V1().Secrets() -} -func (v *scopedCoreV1) Services() coreinformersv1.ServiceInformer { - base := v.f.shared.Core().V1().Services().Informer() - return &scopedServiceInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedServiceLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID}, - } -} -func (v *scopedCoreV1) ServiceAccounts() coreinformersv1.ServiceAccountInformer { - return v.f.shared.Core().V1().ServiceAccounts() -} - -type scopedDiscoveryGroup struct{ f *scopedFactory } - -func (g *scopedDiscoveryGroup) V1() discoveryinformersv1.Interface { - return &scopedDiscoveryV1{f: g.f} -} -func (g *scopedDiscoveryGroup) V1beta1() discoveryinformersv1beta1.Interface { - return g.f.shared.Discovery().V1beta1() -} - -type scopedDiscoveryV1 struct{ f *scopedFactory } - -func (v *scopedDiscoveryV1) EndpointSlices() discoveryinformersv1.EndpointSliceInformer { - base := v.f.shared.Discovery().V1().EndpointSlices().Informer() - return &scopedEndpointSliceInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedEndpointSliceLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID}, - } -} - -type scopedAdmissionregistrationGroup struct{ f *scopedFactory } - -func (g *scopedAdmissionregistrationGroup) V1() admissionregistrationinformersv1.Interface { - return &scopedAdmissionregistrationV1{f: g.f} -} -func (g *scopedAdmissionregistrationGroup) V1alpha1() admissionregistrationinformersv1alpha1.Interface { - return g.f.shared.Admissionregistration().V1alpha1() -} -func (g *scopedAdmissionregistrationGroup) V1beta1() admissionregistrationinformersv1beta1.Interface { - return g.f.shared.Admissionregistration().V1beta1() -} - -type scopedAdmissionregistrationV1 struct{ f *scopedFactory } - -func (v *scopedAdmissionregistrationV1) MutatingWebhookConfigurations() admissionregistrationinformersv1.MutatingWebhookConfigurationInformer { - base := v.f.shared.Admissionregistration().V1().MutatingWebhookConfigurations().Informer() - return &scopedMutatingWebhookConfigurationInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedMutatingWebhookConfigurationLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID}, - } -} -func (v *scopedAdmissionregistrationV1) ValidatingWebhookConfigurations() admissionregistrationinformersv1.ValidatingWebhookConfigurationInformer { - base := v.f.shared.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer() - return &scopedValidatingWebhookConfigurationInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedValidatingWebhookConfigurationLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID}, - } -} -func (v *scopedAdmissionregistrationV1) ValidatingAdmissionPolicies() admissionregistrationinformersv1.ValidatingAdmissionPolicyInformer { - return v.f.shared.Admissionregistration().V1().ValidatingAdmissionPolicies() -} -func (v *scopedAdmissionregistrationV1) ValidatingAdmissionPolicyBindings() admissionregistrationinformersv1.ValidatingAdmissionPolicyBindingInformer { - return v.f.shared.Admissionregistration().V1().ValidatingAdmissionPolicyBindings() -} - -func newFilteredSharedIndexInformer(shared cache.SharedIndexInformer, clusterID, clusterLabelKey string) cache.SharedIndexInformer { - return scopedinformer.NewFilteredSharedIndexInformer(shared, clusterID, clusterLabelKey) -} - -func objectCluster(obj interface{}, clusterLabelKey string) string { - return scopedinformer.ObjectCluster(obj, clusterLabelKey) -} - -func filteredByCluster(indexer cache.Indexer, clusterID string) []interface{} { - return scopedinformer.FilteredByCluster(indexer, clusterID) -} - -type scopedNamespaceInformer struct { - informer cache.SharedIndexInformer - lister corelisters.NamespaceLister -} - -func (i *scopedNamespaceInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedNamespaceInformer) Lister() corelisters.NamespaceLister { return i.lister } - -type scopedServiceInformer struct { - informer cache.SharedIndexInformer - lister corelisters.ServiceLister -} - -func (i *scopedServiceInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedServiceInformer) Lister() corelisters.ServiceLister { return i.lister } - -type scopedEndpointSliceInformer struct { - informer cache.SharedIndexInformer - lister discoverylisters.EndpointSliceLister -} - -func (i *scopedEndpointSliceInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedEndpointSliceInformer) Lister() discoverylisters.EndpointSliceLister { - return i.lister -} - -type scopedMutatingWebhookConfigurationInformer struct { - informer cache.SharedIndexInformer - lister admissionregistrationlisters.MutatingWebhookConfigurationLister -} - -func (i *scopedMutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return i.informer -} -func (i *scopedMutatingWebhookConfigurationInformer) Lister() admissionregistrationlisters.MutatingWebhookConfigurationLister { - return i.lister -} - -type scopedValidatingWebhookConfigurationInformer struct { - informer cache.SharedIndexInformer - lister admissionregistrationlisters.ValidatingWebhookConfigurationLister -} - -func (i *scopedValidatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return i.informer -} -func (i *scopedValidatingWebhookConfigurationInformer) Lister() admissionregistrationlisters.ValidatingWebhookConfigurationLister { - return i.lister -} - -type scopedNamespaceLister struct { - indexer cache.Indexer - clusterID string -} - -func (l *scopedNamespaceLister) List(sel labels.Selector) (ret []*corev1.Namespace, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Namespace) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} - -func (l *scopedNamespaceLister) Get(name string) (*corev1.Namespace, error) { - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Namespace) - if !ok { - continue - } - if obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("namespace %q not found", name) -} - -type scopedServiceLister struct { - indexer cache.Indexer - clusterID string -} - -func (l *scopedServiceLister) List(sel labels.Selector) (ret []*corev1.Service, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Service) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} - -func (l *scopedServiceLister) Services(namespace string) corelisters.ServiceNamespaceLister { - return &scopedServiceNamespaceLister{parent: l, namespace: namespace} -} - -type scopedServiceNamespaceLister struct { - parent *scopedServiceLister - namespace string -} - -func (l *scopedServiceNamespaceLister) List(sel labels.Selector) (ret []*corev1.Service, err error) { - all, _ := l.parent.List(sel) - for _, obj := range all { - if obj.Namespace == l.namespace { - ret = append(ret, obj) - } - } - return ret, nil -} - -func (l *scopedServiceNamespaceLister) Get(name string) (*corev1.Service, error) { - all, _ := l.parent.List(labels.Everything()) - for _, obj := range all { - if obj.Namespace == l.namespace && obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("service %s/%s not found", l.namespace, name) -} - -type scopedEndpointSliceLister struct { - indexer cache.Indexer - clusterID string -} - -func (l *scopedEndpointSliceLister) List(sel labels.Selector) (ret []*discoveryv1.EndpointSlice, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*discoveryv1.EndpointSlice) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} - -func (l *scopedEndpointSliceLister) EndpointSlices(namespace string) discoverylisters.EndpointSliceNamespaceLister { - return &scopedEndpointSliceNamespaceLister{parent: l, namespace: namespace} -} - -type scopedEndpointSliceNamespaceLister struct { - parent *scopedEndpointSliceLister - namespace string -} - -func (l *scopedEndpointSliceNamespaceLister) List(sel labels.Selector) (ret []*discoveryv1.EndpointSlice, err error) { - all, _ := l.parent.List(sel) - for _, obj := range all { - if obj.Namespace == l.namespace { - ret = append(ret, obj) - } - } - return ret, nil -} - -func (l *scopedEndpointSliceNamespaceLister) Get(name string) (*discoveryv1.EndpointSlice, error) { - all, _ := l.parent.List(labels.Everything()) - for _, obj := range all { - if obj.Namespace == l.namespace && obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("endpointslice %s/%s not found", l.namespace, name) -} - -type scopedMutatingWebhookConfigurationLister struct { - indexer cache.Indexer - clusterID string -} - -func (l *scopedMutatingWebhookConfigurationLister) List(sel labels.Selector) (ret []*admissionregistrationv1.MutatingWebhookConfiguration, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*admissionregistrationv1.MutatingWebhookConfiguration) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} - -func (l *scopedMutatingWebhookConfigurationLister) Get(name string) (*admissionregistrationv1.MutatingWebhookConfiguration, error) { - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*admissionregistrationv1.MutatingWebhookConfiguration) - if !ok { - continue - } - if obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("mutatingwebhookconfiguration %q not found", name) -} - -type scopedValidatingWebhookConfigurationLister struct { - indexer cache.Indexer - clusterID string -} - -func (l *scopedValidatingWebhookConfigurationLister) List(sel labels.Selector) (ret []*admissionregistrationv1.ValidatingWebhookConfiguration, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*admissionregistrationv1.ValidatingWebhookConfiguration) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} - -func (l *scopedValidatingWebhookConfigurationLister) Get(name string) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) { - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*admissionregistrationv1.ValidatingWebhookConfiguration) - if !ok { - continue - } - if obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("validatingwebhookconfiguration %q not found", name) -} diff --git a/pkg/multicluster/admission/webhook/validating/dispatcher.go b/pkg/multicluster/admission/webhook/validating/dispatcher.go index 8afd335..c208030 100644 --- a/pkg/multicluster/admission/webhook/validating/dispatcher.go +++ b/pkg/multicluster/admission/webhook/validating/dispatcher.go @@ -255,7 +255,7 @@ func (d *validatingDispatcher) callHook(ctx context.Context, h *v1.ValidatingWeb } } - uid, request, response, err := webhookrequest.CreateAdmissionObjects(versionedAttr, invocation) + uid, request, response, err := webhookrequest.CreateAdmissionObjects(generic.EnsureVersionedAttributesUserInfo(versionedAttr), invocation) if err != nil { return &webhookutil.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("could not create admission objects: %w", err), Status: apierrors.NewBadRequest("error creating admission objects")} } diff --git a/pkg/multicluster/apiextensions_informerpool.go b/pkg/multicluster/apiextensions_informerpool.go deleted file mode 100644 index 8f902e1..0000000 --- a/pkg/multicluster/apiextensions_informerpool.go +++ /dev/null @@ -1,108 +0,0 @@ -package multicluster - -import ( - "fmt" - "sync" - "time" - - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" -) - -type APIExtensionsInformerPoolOptions struct { - ClientForCluster func(clusterID string) (apiextensionsclient.Interface, error) - ResyncPeriod time.Duration - StopCh <-chan struct{} - StartOnGet bool -} - -type APIExtensionsInformerPool struct { - opts APIExtensionsInformerPoolOptions - - mu sync.Mutex - clusters map[string]*apiExtensionsInformerEntry -} - -type apiExtensionsInformerEntry struct { - clientset apiextensionsclient.Interface - factory apiextensionsinformers.SharedInformerFactory - stopCh <-chan struct{} - ownedCh chan struct{} -} - -func NewAPIExtensionsInformerPool(opts APIExtensionsInformerPoolOptions) *APIExtensionsInformerPool { - if opts.StartOnGet == false { - // keep explicit - } else { - opts.StartOnGet = true - } - return &APIExtensionsInformerPool{ - opts: opts, - clusters: map[string]*apiExtensionsInformerEntry{}, - } -} - -func NewAPIExtensionsInformerPoolFromClientPool(pool *APIExtensionsClientPool, resync time.Duration, stopCh <-chan struct{}) *APIExtensionsInformerPool { - return NewAPIExtensionsInformerPool(APIExtensionsInformerPoolOptions{ - ClientForCluster: pool.APIExtensionsClientForCluster, - ResyncPeriod: resync, - StopCh: stopCh, - StartOnGet: true, - }) -} - -func (p *APIExtensionsInformerPool) Get(clusterID string) (apiextensionsclient.Interface, apiextensionsinformers.SharedInformerFactory, <-chan struct{}, error) { - p.mu.Lock() - defer p.mu.Unlock() - - if entry, ok := p.clusters[clusterID]; ok { - if p.opts.StartOnGet { - entry.start() - } - return entry.clientset, entry.factory, entry.stopCh, nil - } - if p.opts.ClientForCluster == nil { - return nil, nil, nil, ErrMissingAPIExtensionsClientFactory - } - cs, err := p.opts.ClientForCluster(clusterID) - if err != nil { - return nil, nil, nil, err - } - factory := apiextensionsinformers.NewSharedInformerFactory(cs, p.opts.ResyncPeriod) - stopCh := p.opts.StopCh - var ownedCh chan struct{} - if stopCh == nil { - ownedCh = make(chan struct{}) - stopCh = ownedCh - } - entry := &apiExtensionsInformerEntry{ - clientset: cs, - factory: factory, - stopCh: stopCh, - ownedCh: ownedCh, - } - p.clusters[clusterID] = entry - if p.opts.StartOnGet { - entry.start() - } - return entry.clientset, entry.factory, entry.stopCh, nil -} - -func (e *apiExtensionsInformerEntry) start() { - e.factory.Start(e.stopCh) -} - -func (p *APIExtensionsInformerPool) StopCluster(clusterID string) { - p.mu.Lock() - defer p.mu.Unlock() - entry, ok := p.clusters[clusterID] - if !ok { - return - } - if entry.ownedCh != nil { - close(entry.ownedCh) - } - delete(p.clusters, clusterID) -} - -var ErrMissingAPIExtensionsClientFactory = fmt.Errorf("missing apiextensions client factory for informer pool") diff --git a/pkg/multicluster/auth/dispatcher.go b/pkg/multicluster/auth/dispatcher.go index 8333cdc..410d132 100644 --- a/pkg/multicluster/auth/dispatcher.go +++ b/pkg/multicluster/auth/dispatcher.go @@ -2,7 +2,9 @@ package auth import ( "context" + "fmt" "net/http" + "reflect" "strings" mc "github.com/kplane-dev/apiserver/pkg/multicluster" @@ -48,8 +50,8 @@ func (c *ClusterAuthenticator) AuthenticateRequest(req *http.Request) (*authenti } } useRoot := cid == "" || cid == c.rootCluster - if useRoot && c.root != nil { - return c.root.AuthenticateRequest(req) + if useRoot && !isNil(c.root) { + return authenticateSafely(c.root, req, "root") } if c.resolver == nil { return nil, false, nil @@ -58,10 +60,10 @@ func (c *ClusterAuthenticator) AuthenticateRequest(req *http.Request) (*authenti if err != nil { return nil, false, err } - if authn == nil { + if isNil(authn) { return nil, false, nil } - return authn.AuthenticateRequest(req) + return authenticateSafely(authn, req, cid) } // ClusterAuthorizer dispatches authorization per cluster. @@ -93,27 +95,33 @@ func NewClusterAuthorizer(rootCluster string, root authorizer.Authorizer, rootRe // Authorize dispatches by cluster context. func (c *ClusterAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { cid := clusterFromContext(ctx) - if cid == "" || (cid == c.rootCluster && c.root != nil) || c.resolver == nil { - if c.root == nil { + if cid == "" || (cid == c.rootCluster && !isNil(c.root)) || c.resolver == nil { + if isNil(c.root) { return authorizer.DecisionNoOpinion, "no root authorizer", nil } - return c.root.Authorize(ctx, a) + if err := validateAttributesForAuthorize(a, "root", authorizerType(c.root)); err != nil { + return authorizer.DecisionDeny, "", err + } + return authorizeSafely(c.root, ctx, a, "root") } authz, _, err := c.resolver.AuthorizerForCluster(cid) if err != nil { - return authorizer.DecisionNoOpinion, "", err + return authorizer.DecisionDeny, "", err } - if authz == nil { + if isNil(authz) { return authorizer.DecisionNoOpinion, "no cluster authorizer", nil } - return authz.Authorize(ctx, a) + if err := validateAttributesForAuthorize(a, cid, authorizerType(authz)); err != nil { + return authorizer.DecisionDeny, "", err + } + return authorizeSafely(authz, ctx, a, cid) } // RulesFor dispatches rule resolution per cluster. func (c *ClusterAuthorizer) RulesFor(ctx context.Context, u user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { cid := clusterFromContext(ctx) - if cid == "" || (cid == c.rootCluster && c.rootResolver != nil) || c.resolver == nil { - if c.rootResolver == nil { + if cid == "" || (cid == c.rootCluster && !isNil(c.rootResolver)) || c.resolver == nil { + if isNil(c.rootResolver) { return nil, nil, false, nil } return c.rootResolver.RulesFor(ctx, u, namespace) @@ -122,12 +130,88 @@ func (c *ClusterAuthorizer) RulesFor(ctx context.Context, u user.Info, namespace if err != nil { return nil, nil, false, err } - if resolver == nil { + if isNil(resolver) { return nil, nil, false, nil } return resolver.RulesFor(ctx, u, namespace) } +func isNil(v any) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func authorizeSafely(authz authorizer.Authorizer, ctx context.Context, a authorizer.Attributes, target string) (decision authorizer.Decision, reason string, err error) { + defer func() { + if r := recover(); r != nil { + decision = authorizer.DecisionDeny + reason = "" + err = fmt.Errorf("authorizer panic for cluster %q (type=%s): %v", target, authorizerType(authz), r) + } + }() + return authz.Authorize(ctx, a) +} + +func validateAttributesForAuthorize(a authorizer.Attributes, clusterID, authzType string) error { + if isNil(a) { + return fmt.Errorf("invalid authorization attributes for cluster %q (authorizer=%s): attributes is nil", clusterID, authzType) + } + u, err := userFromAttributes(a) + if err != nil { + return fmt.Errorf("invalid authorization attributes for cluster %q (authorizer=%s): %w", clusterID, authzType, err) + } + if isNil(u) { + return fmt.Errorf("invalid authorization attributes for cluster %q (authorizer=%s): user is nil", clusterID, authzType) + } + return nil +} + +func userFromAttributes(a authorizer.Attributes) (u user.Info, err error) { + defer func() { + if r := recover(); r != nil { + u = nil + err = fmt.Errorf("GetUser panic: %v", r) + } + }() + return a.GetUser(), nil +} + +func authorizerType(authz authorizer.Authorizer) string { + if isNil(authz) { + return "" + } + return fmt.Sprintf("%T", authz) +} + +func authenticateSafely(authn authenticator.Request, req *http.Request, target string) (resp *authenticator.Response, ok bool, err error) { + defer func() { + if r := recover(); r != nil { + resp = nil + ok = false + err = fmt.Errorf("authenticator panic for cluster %q (type=%T): %v", target, authn, r) + } + }() + resp, ok, err = authn.AuthenticateRequest(req) + if err != nil || !ok { + return resp, ok, err + } + if resp == nil { + return nil, false, fmt.Errorf("invalid authenticator response for cluster %q (type=%T): response is nil", target, authn) + } + if isNil(resp.User) { + return nil, false, fmt.Errorf("invalid authenticator response for cluster %q (type=%T): user is nil", target, authn) + } + return resp, ok, nil +} + func clusterFromContext(ctx context.Context) string { cid, _, _ := mc.FromContext(ctx) return cid diff --git a/pkg/multicluster/auth/dispatcher_test.go b/pkg/multicluster/auth/dispatcher_test.go index fa30345..ea4d588 100644 --- a/pkg/multicluster/auth/dispatcher_test.go +++ b/pkg/multicluster/auth/dispatcher_test.go @@ -24,6 +24,12 @@ func (f *fakeAuthenticator) AuthenticateRequest(*http.Request) (*authenticator.R return &authenticator.Response{User: &user.DefaultInfo{Name: f.name}}, true, nil } +type badUserAuthenticator struct{} + +func (b *badUserAuthenticator) AuthenticateRequest(*http.Request) (*authenticator.Response, bool, error) { + return &authenticator.Response{}, true, nil +} + type fakeAuthorizer struct { name string called *string @@ -43,6 +49,16 @@ func (f *fakeAuthorizer) RulesFor(ctx context.Context, _ user.Info, _ string) ([ return nil, nil, false, nil } +type panicAuthorizer struct{} + +func (p *panicAuthorizer) Authorize(context.Context, authorizer.Attributes) (authorizer.Decision, string, error) { + panic("boom") +} + +func (p *panicAuthorizer) RulesFor(context.Context, user.Info, string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + return nil, nil, false, nil +} + type fakeResolver struct { authn authenticator.Request authz authorizer.Authorizer @@ -102,9 +118,10 @@ func TestClusterAuthorizerDispatch(t *testing.T) { resolver := &fakeResolver{authz: cluster, ruleResolver: cluster, lastCluster: &lastCluster} dispatch := NewClusterAuthorizer("root", root, root, resolver) + attrs := authorizer.AttributesRecord{User: &user.DefaultInfo{Name: "test-user"}} ctx := mc.WithCluster(context.Background(), "root", false) - _, _, _ = dispatch.Authorize(ctx, authorizer.AttributesRecord{}) + _, _, _ = dispatch.Authorize(ctx, attrs) if called != "root" { t.Fatalf("expected root authorizer, got %q", called) } @@ -112,7 +129,7 @@ func TestClusterAuthorizerDispatch(t *testing.T) { called = "" lastCluster = "" ctx = mc.WithCluster(context.Background(), "c-2", false) - _, _, _ = dispatch.Authorize(ctx, authorizer.AttributesRecord{}) + _, _, _ = dispatch.Authorize(ctx, attrs) if called != "cluster" { t.Fatalf("expected cluster authorizer, got %q", called) } @@ -155,3 +172,94 @@ func TestClusterAuthenticatorUsesTokenHintWithoutClusterContext(t *testing.T) { t.Fatalf("expected resolver cluster c-42, got %q", lastCluster) } } + +func TestClusterAuthenticatorRejectsNilUserResponse(t *testing.T) { + dispatch := NewClusterAuthenticator("root", nil, &fakeResolver{ + authn: &badUserAuthenticator{}, + }) + req := httptest.NewRequest("GET", "http://example", nil) + req = req.WithContext(mc.WithCluster(req.Context(), "c-bad-user", false)) + + resp, ok, err := dispatch.AuthenticateRequest(req) + if err == nil { + t.Fatalf("expected error, got nil") + } + if ok { + t.Fatalf("expected ok=false for invalid auth response") + } + if resp != nil { + t.Fatalf("expected nil response on invalid auth response") + } +} + +func TestClusterAuthorizerTypedNilDoesNotPanic(t *testing.T) { + var typedNilCluster *fakeAuthorizer + dispatch := NewClusterAuthorizer("root", &fakeAuthorizer{name: "root"}, nil, &fakeResolver{ + authz: typedNilCluster, + ruleResolver: typedNilCluster, + }) + attrs := authorizer.AttributesRecord{User: &user.DefaultInfo{Name: "test-user"}} + + ctx := mc.WithCluster(context.Background(), "c-typed-nil", false) + decision, reason, err := dispatch.Authorize(ctx, attrs) + if err != nil { + t.Fatalf("expected nil error, got %v", err) + } + if decision != authorizer.DecisionNoOpinion { + t.Fatalf("expected DecisionNoOpinion, got %v", decision) + } + if reason != "no cluster authorizer" { + t.Fatalf("expected no cluster authorizer reason, got %q", reason) + } + + _, _, incomplete, err := dispatch.RulesFor(ctx, &user.DefaultInfo{Name: "test"}, "") + if err != nil { + t.Fatalf("expected nil error from RulesFor, got %v", err) + } + if incomplete { + t.Fatalf("expected incomplete=false for missing resolver") + } +} + +func TestClusterAuthorizerRootTypedNilDoesNotPanic(t *testing.T) { + var typedNilRoot *fakeAuthorizer + dispatch := NewClusterAuthorizer("root", typedNilRoot, nil, nil) + + decision, reason, err := dispatch.Authorize(context.Background(), authorizer.AttributesRecord{User: &user.DefaultInfo{Name: "test-user"}}) + if err != nil { + t.Fatalf("expected nil error, got %v", err) + } + if decision != authorizer.DecisionNoOpinion { + t.Fatalf("expected DecisionNoOpinion, got %v", decision) + } + if reason != "no root authorizer" { + t.Fatalf("expected no root authorizer reason, got %q", reason) + } +} + +func TestClusterAuthorizerPanicIsRecovered(t *testing.T) { + dispatch := NewClusterAuthorizer("root", &fakeAuthorizer{name: "root"}, nil, &fakeResolver{ + authz: &panicAuthorizer{}, + }) + attrs := authorizer.AttributesRecord{User: &user.DefaultInfo{Name: "test-user"}} + + ctx := mc.WithCluster(context.Background(), "c-panic", false) + decision, _, err := dispatch.Authorize(ctx, attrs) + if err == nil { + t.Fatalf("expected recovered panic error, got nil") + } + if decision != authorizer.DecisionDeny { + t.Fatalf("expected DecisionDeny on panic, got %v", decision) + } +} + +func TestClusterAuthorizerNilUserDenied(t *testing.T) { + dispatch := NewClusterAuthorizer("root", &fakeAuthorizer{name: "root"}, nil, nil) + decision, _, err := dispatch.Authorize(context.Background(), authorizer.AttributesRecord{}) + if err == nil { + t.Fatalf("expected error for nil user, got nil") + } + if decision != authorizer.DecisionDeny { + t.Fatalf("expected DecisionDeny for nil user, got %v", decision) + } +} diff --git a/pkg/multicluster/auth/manager.go b/pkg/multicluster/auth/manager.go index 0738acd..6138a05 100644 --- a/pkg/multicluster/auth/manager.go +++ b/pkg/multicluster/auth/manager.go @@ -3,13 +3,16 @@ package auth import ( "context" "fmt" - "strings" "sync" + "time" mc "github.com/kplane-dev/apiserver/pkg/multicluster" - "github.com/kplane-dev/apiserver/pkg/multicluster/scopedinformer" + "github.com/kplane-dev/apiserver/pkg/multicluster/typedinformer" + mcinformer "github.com/kplane-dev/informer" + mcstorage "github.com/kplane-dev/storage" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" @@ -17,16 +20,13 @@ import ( authzunion "k8s.io/apiserver/pkg/authorization/union" "k8s.io/apiserver/pkg/server/egressselector" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller/serviceaccount" "k8s.io/kubernetes/pkg/features" - rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" + rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" rbacauthorizer "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap" ) @@ -41,7 +41,7 @@ type Options struct { EgressSelector *egressselector.EgressSelector APIServerID string ClientPool *mc.ClientPool - InformerPool *mc.InformerPool + InformerRegistry *mc.InformerRegistry } // Manager builds per-cluster authenticators and authorizers on demand. @@ -52,19 +52,15 @@ type Manager struct { mu sync.Mutex clusters map[string]*clusterEnv - sharedOnce sync.Once - sharedErr error - sharedAuth informers.SharedInformerFactory - rbacStore *rbacProjectionStore - sharedStop <-chan struct{} - sharedOwn chan struct{} + rbacOnce sync.Once + rbacErr error + rbacStore *rbacProjectionStore } type clusterEnv struct { cid string clientset kubernetes.Interface - informers informers.SharedInformerFactory authenticator authenticator.Request authorizer authorizer.Authorizer @@ -108,10 +104,6 @@ func (m *Manager) AuthorizerForCluster(clusterID string) (authorizer.Authorizer, func (m *Manager) StopCluster(clusterID string) { m.mu.Lock() defer m.mu.Unlock() - _, ok := m.clusters[clusterID] - if !ok { - return - } delete(m.clusters, clusterID) } @@ -121,185 +113,146 @@ func (m *Manager) envForCluster(clusterID string) (*clusterEnv, error) { m.mu.Unlock() return env, nil } - if m.opts.ClientPool == nil { m.mu.Unlock() return nil, fmt.Errorf("loopback client pool is required for cluster auth") } + m.mu.Unlock() cs, err := m.opts.ClientPool.KubeClientForCluster(clusterID) if err != nil { - m.mu.Unlock() return nil, err } - var ( - scopedFactory informers.SharedInformerFactory - authn authenticator.Request - ) - var ( - authz authorizer.Authorizer - resolver authorizer.RuleResolver - ) - if m.useSharedRBACAuthorizer() { - listers, err := m.coreListersForCluster(clusterID) - if err != nil { - m.mu.Unlock() - return nil, err - } - authn, err = buildAuthenticatorWithCoreListers(m.ctx, m.opts, cs, listers) - if err != nil { - m.mu.Unlock() - return nil, err - } - authz, resolver, err = m.buildSharedRBACAuthorizerForCluster(clusterID) - } else { - scopedFactory, err = m.scopedAuthFactory(clusterID) - if err != nil { - m.mu.Unlock() - return nil, err - } - authn, err = buildAuthenticator(m.ctx, m.opts, cs, scopedFactory) - if err != nil { - m.mu.Unlock() - return nil, err - } - authz, resolver, err = buildAuthorizer(m.ctx, m.opts, scopedFactory) + // Build authenticator with MCI-backed core listers. + listers, err := m.coreListersForCluster(clusterID) + if err != nil { + return nil, err } + authn, err := buildAuthenticatorWithCoreListers(m.ctx, m.opts, cs, listers) if err != nil { - m.mu.Unlock() return nil, err } + // Build authorizer via RBAC projection store with MCI event handlers. + if err := m.ensureRBACProjection(); err != nil { + return nil, err + } + authz, ruleResolver := m.buildRBACAuthorizerForCluster(clusterID) + env := &clusterEnv{ cid: clusterID, clientset: cs, - informers: scopedFactory, authenticator: authn, authorizer: authz, - ruleResolver: resolver, + ruleResolver: ruleResolver, } + m.mu.Lock() + if existing, ok := m.clusters[clusterID]; ok { + m.mu.Unlock() + return existing, nil + } m.clusters[clusterID] = env m.mu.Unlock() return env, nil } -func (m *Manager) scopedAuthFactory(clusterID string) (informers.SharedInformerFactory, error) { - shared, err := m.ensureSharedAuthFactory() +func (m *Manager) coreListersForCluster(clusterID string) (*coreAuthListers, error) { + if m.opts.InformerRegistry == nil { + return nil, fmt.Errorf("informer registry is required for core auth listers") + } + secretsMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Resource: "secrets"}) if err != nil { return nil, err } - return newScopedFactory(clusterID, mc.DefaultClusterAnnotation, shared, m.rbacStore), nil + saMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Resource: "serviceaccounts"}) + if err != nil { + return nil, err + } + podsMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Resource: "pods"}) + if err != nil { + return nil, err + } + nodesMCI, err := m.opts.InformerRegistry.Get(schema.GroupResource{Resource: "nodes"}) + if err != nil { + return nil, err + } + return &coreAuthListers{ + secrets: typedinformer.NewSecretLister(secretsMCI, clusterID), + serviceAccounts: typedinformer.NewServiceAccountLister(saMCI, clusterID), + pods: typedinformer.NewPodLister(podsMCI, clusterID), + nodes: typedinformer.NewNodeLister(nodesMCI, clusterID), + }, nil } -func (m *Manager) ensureSharedAuthFactory() (informers.SharedInformerFactory, error) { - m.sharedOnce.Do(func() { - if m.opts.BaseLoopbackClientConfig == nil { - m.sharedErr = fmt.Errorf("base loopback config is required for shared auth factory") - return - } - cs, err := scopedinformer.NewAllClustersKubeClient(m.opts.BaseLoopbackClientConfig) - if err != nil { - m.sharedErr = err +// ensureRBACProjection registers MCI event handlers on RBAC resource types +// to incrementally populate the projection store. +func (m *Manager) ensureRBACProjection() error { + m.rbacOnce.Do(func() { + if m.opts.InformerRegistry == nil { + m.rbacErr = fmt.Errorf("informer registry is required for RBAC projection") return } - factory := informers.NewSharedInformerFactory(cs, 0) - // Warm and index shared auth-critical informers once. - authInformers := []cache.SharedIndexInformer{ - factory.Core().V1().Secrets().Informer(), - factory.Core().V1().ServiceAccounts().Informer(), - factory.Core().V1().Pods().Informer(), - factory.Core().V1().Nodes().Informer(), - factory.Rbac().V1().Roles().Informer(), - factory.Rbac().V1().RoleBindings().Informer(), - factory.Rbac().V1().ClusterRoles().Informer(), - factory.Rbac().V1().ClusterRoleBindings().Informer(), + m.rbacStore = newRBACProjectionStore() + + rbacResources := []schema.GroupResource{ + {Group: "rbac.authorization.k8s.io", Resource: "roles"}, + {Group: "rbac.authorization.k8s.io", Resource: "rolebindings"}, + {Group: "rbac.authorization.k8s.io", Resource: "clusterroles"}, + {Group: "rbac.authorization.k8s.io", Resource: "clusterrolebindings"}, } - for _, inf := range authInformers { - if err := scopedinformer.EnsureClusterIndex(inf, mc.DefaultClusterAnnotation); err != nil { - m.sharedErr = err + + mcis := make([]*mcinformer.MultiClusterInformer, 0, len(rbacResources)) + for _, gr := range rbacResources { + mci, err := m.opts.InformerRegistry.Get(gr) + if err != nil { + m.rbacErr = fmt.Errorf("failed to get MCI for %s: %w", gr, err) return } + mci.AddEventHandler(mcinformer.MultiClusterEventHandlerFuncs{ + AddFunc: func(obj *mcstorage.ObjectWithClusterIdentity, _ bool) { + m.rbacStore.upsertWithCluster(obj.Object, obj.ClusterID) + }, + UpdateFunc: func(_, newObj *mcstorage.ObjectWithClusterIdentity) { + m.rbacStore.upsertWithCluster(newObj.Object, newObj.ClusterID) + }, + DeleteFunc: func(obj *mcstorage.ObjectWithClusterIdentity) { + m.rbacStore.deleteWithCluster(obj.Object, obj.ClusterID) + }, + }) + mcis = append(mcis, mci) } - rbacStore := newRBACProjectionStore(mc.DefaultClusterAnnotation) - if err := registerRBACProjectionHandlers( - rbacStore, - factory.Rbac().V1().Roles().Informer(), - factory.Rbac().V1().RoleBindings().Informer(), - factory.Rbac().V1().ClusterRoles().Informer(), - factory.Rbac().V1().ClusterRoleBindings().Informer(), - ); err != nil { - m.sharedErr = err - return + + // Wait for all MCIs to complete their initial list before backfilling. + // HasSynced becomes true after the cacher sends all existing objects + // via the initial watch stream. + for _, mci := range mcis { + for !mci.HasSynced() { + select { + case <-m.ctx.Done(): + m.rbacErr = m.ctx.Err() + return + default: + time.Sleep(10 * time.Millisecond) + } + } } - if m.sharedStop == nil { - m.sharedOwn = make(chan struct{}) - m.sharedStop = m.sharedOwn + + // Backfill existing objects — event handlers registered above only + // receive future events; objects already in the store must be seeded. + for _, mci := range mcis { + for _, clusterID := range mci.Clusters() { + for _, obj := range mci.List(clusterID) { + m.rbacStore.upsertWithCluster(obj, clusterID) + } + } } - factory.Start(m.sharedStop) - m.sharedAuth = factory - m.rbacStore = rbacStore }) - if m.sharedErr != nil { - return nil, m.sharedErr - } - return m.sharedAuth, nil -} - -type coreAuthListers struct { - secrets corelisters.SecretLister - serviceAccounts corelisters.ServiceAccountLister - pods corelisters.PodLister - nodes corelisters.NodeLister -} - -func (m *Manager) coreListersForCluster(clusterID string) (*coreAuthListers, error) { - shared, err := m.ensureSharedAuthFactory() - if err != nil { - return nil, err - } - return &coreAuthListers{ - secrets: &scopedSecretLister{ - indexer: shared.Core().V1().Secrets().Informer().GetIndexer(), - clusterID: clusterID, - clusterLabelKey: mc.DefaultClusterAnnotation, - }, - serviceAccounts: &scopedServiceAccountLister{ - indexer: shared.Core().V1().ServiceAccounts().Informer().GetIndexer(), - clusterID: clusterID, - clusterLabelKey: mc.DefaultClusterAnnotation, - }, - pods: &scopedPodLister{ - indexer: shared.Core().V1().Pods().Informer().GetIndexer(), - clusterID: clusterID, - clusterLabelKey: mc.DefaultClusterAnnotation, - }, - nodes: &scopedNodeLister{ - indexer: shared.Core().V1().Nodes().Informer().GetIndexer(), - clusterID: clusterID, - clusterLabelKey: mc.DefaultClusterAnnotation, - }, - }, nil + return m.rbacErr } -func (m *Manager) useSharedRBACAuthorizer() bool { - if m == nil || m.opts.Authorization == nil { - return false - } - if len(m.opts.Authorization.Modes) != 1 { - return false - } - return strings.EqualFold(m.opts.Authorization.Modes[0], "RBAC") -} - -func (m *Manager) buildSharedRBACAuthorizerForCluster(clusterID string) (authorizer.Authorizer, authorizer.RuleResolver, error) { - if _, err := m.ensureSharedAuthFactory(); err != nil { - return nil, nil, err - } - if m.rbacStore == nil { - return nil, nil, fmt.Errorf("shared RBAC projection store is not initialized") - } +func (m *Manager) buildRBACAuthorizerForCluster(clusterID string) (authorizer.Authorizer, authorizer.RuleResolver) { resolver := &clusterAwareRBACDataSource{ store: m.rbacStore, defaultCluster: clusterID, @@ -307,8 +260,14 @@ func (m *Manager) buildSharedRBACAuthorizerForCluster(clusterID string) (authori } rbacAuthz := rbacauthorizer.New(resolver, resolver, resolver, resolver) superuser := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup) - // Match upstream shape: privileged groups short-circuit before RBAC checks. - return authzunion.New(superuser, rbacAuthz), rbacAuthz, nil + return authzunion.New(superuser, rbacAuthz), rbacAuthz +} + +type coreAuthListers struct { + secrets corelisters.SecretLister + serviceAccounts corelisters.ServiceAccountLister + pods corelisters.PodLister + nodes corelisters.NodeLister } type clusterAwareRBACDataSource struct { @@ -388,55 +347,6 @@ var _ rbacregistryvalidation.RoleBindingLister = (*clusterAwareRBACDataSource)(n var _ rbacregistryvalidation.ClusterRoleGetter = (*clusterAwareRBACDataSource)(nil) var _ rbacregistryvalidation.ClusterRoleBindingLister = (*clusterAwareRBACDataSource)(nil) -func buildAuthenticator(ctx context.Context, opts Options, clientset kubernetes.Interface, informers informers.SharedInformerFactory) (authenticator.Request, error) { - if opts.Authentication == nil { - return nil, nil - } - authConfig, err := opts.Authentication.ToAuthenticationConfig() - if err != nil { - return nil, err - } - - if opts.Authentication.ServiceAccounts != nil && opts.Authentication.ServiceAccounts.OptionalTokenGetter != nil { - authConfig.ServiceAccountTokenGetter = opts.Authentication.ServiceAccounts.OptionalTokenGetter(informers) - } else { - var nodeLister v1.NodeLister - if utilfeature.DefaultFeatureGate.Enabled(features.ServiceAccountTokenNodeBindingValidation) { - nodeLister = informers.Core().V1().Nodes().Lister() - } - - authConfig.ServiceAccountTokenGetter = serviceaccount.NewGetterFromClient( - clientset, - informers.Core().V1().Secrets().Lister(), - informers.Core().V1().ServiceAccounts().Lister(), - informers.Core().V1().Pods().Lister(), - nodeLister, - ) - } - authConfig.SecretsWriter = clientset.CoreV1() - - if authConfig.BootstrapToken { - authConfig.BootstrapTokenAuthenticator = bootstrap.NewTokenAuthenticator( - informers.Core().V1().Secrets().Lister().Secrets(metav1.NamespaceSystem), - ) - } - - if opts.EgressSelector != nil { - egressDialer, err := opts.EgressSelector.Lookup(egressselector.ControlPlane.AsNetworkContext()) - if err != nil { - return nil, err - } - authConfig.CustomDial = egressDialer - authConfig.EgressLookup = opts.EgressSelector.Lookup - } - - authenticator, _, _, _, err := authConfig.New(ctx) - if err != nil { - return nil, err - } - return authenticator, nil -} - func buildAuthenticatorWithCoreListers(ctx context.Context, opts Options, clientset kubernetes.Interface, listers *coreAuthListers) (authenticator.Request, error) { if opts.Authentication == nil { return nil, nil @@ -452,7 +362,7 @@ func buildAuthenticatorWithCoreListers(ctx context.Context, opts Options, client return nil, err } - var nodeLister v1.NodeLister + var nodeLister corelisters.NodeLister if utilfeature.DefaultFeatureGate.Enabled(features.ServiceAccountTokenNodeBindingValidation) { nodeLister = listers.nodes } @@ -484,24 +394,3 @@ func buildAuthenticatorWithCoreListers(ctx context.Context, opts Options, client } return authenticator, nil } - -func buildAuthorizer(ctx context.Context, opts Options, informers informers.SharedInformerFactory) (authorizer.Authorizer, authorizer.RuleResolver, error) { - if opts.Authorization == nil { - return nil, nil, nil - } - authzConfig, err := opts.Authorization.ToAuthorizationConfig(informers) - if err != nil { - return nil, nil, err - } - if authzConfig == nil { - return nil, nil, nil - } - if opts.EgressSelector != nil { - egressDialer, err := opts.EgressSelector.Lookup(egressselector.ControlPlane.AsNetworkContext()) - if err != nil { - return nil, nil, err - } - authzConfig.CustomDial = egressDialer - } - return authzConfig.New(ctx, opts.APIServerID) -} diff --git a/pkg/multicluster/auth/scoped_factory.go b/pkg/multicluster/auth/scoped_factory.go index 2eb46af..969026c 100644 --- a/pkg/multicluster/auth/scoped_factory.go +++ b/pkg/multicluster/auth/scoped_factory.go @@ -1,300 +1,25 @@ package auth import ( - "fmt" - "reflect" "strings" "sync" - corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/informers" - admissionregistrationinformers "k8s.io/client-go/informers/admissionregistration" - apiserverinternalinformers "k8s.io/client-go/informers/apiserverinternal" - appsinformers "k8s.io/client-go/informers/apps" - autoscalinginformers "k8s.io/client-go/informers/autoscaling" - batchinformers "k8s.io/client-go/informers/batch" - certificatesinformers "k8s.io/client-go/informers/certificates" - coordinationinformers "k8s.io/client-go/informers/coordination" - coreinformers "k8s.io/client-go/informers/core" - coreinformersv1 "k8s.io/client-go/informers/core/v1" - discoveryinformers "k8s.io/client-go/informers/discovery" - eventsinformers "k8s.io/client-go/informers/events" - extensionsinformers "k8s.io/client-go/informers/extensions" - flowcontrolinformers "k8s.io/client-go/informers/flowcontrol" - internalinformers "k8s.io/client-go/informers/internalinterfaces" - networkinginformers "k8s.io/client-go/informers/networking" - nodeinformers "k8s.io/client-go/informers/node" - policyinformers "k8s.io/client-go/informers/policy" - rbacinformers "k8s.io/client-go/informers/rbac" - rbacinformersv1 "k8s.io/client-go/informers/rbac/v1" - rbacinformersv1alpha1 "k8s.io/client-go/informers/rbac/v1alpha1" - rbacinformersv1beta1 "k8s.io/client-go/informers/rbac/v1beta1" - resourceinformers "k8s.io/client-go/informers/resource" - schedulinginformers "k8s.io/client-go/informers/scheduling" - storageinformers "k8s.io/client-go/informers/storage" - storagemigrationinformers "k8s.io/client-go/informers/storagemigration" - corelisters "k8s.io/client-go/listers/core/v1" - rbaclisters "k8s.io/client-go/listers/rbac/v1" - "k8s.io/client-go/tools/cache" - - mc "github.com/kplane-dev/apiserver/pkg/multicluster" - "github.com/kplane-dev/apiserver/pkg/multicluster/scopedinformer" + "k8s.io/kubernetes/pkg/api/legacyscheme" ) -type scopedFactory struct { - clusterID string - clusterLabelKey string - shared informers.SharedInformerFactory - rbacStore *rbacProjectionStore -} - -func newScopedFactory(clusterID, clusterLabelKey string, shared informers.SharedInformerFactory, rbacStore *rbacProjectionStore) informers.SharedInformerFactory { - if clusterLabelKey == "" { - clusterLabelKey = mc.DefaultClusterAnnotation - } - if rbacStore == nil { - rbacStore = newRBACProjectionStore(clusterLabelKey) - } - return &scopedFactory{ - clusterID: clusterID, - clusterLabelKey: clusterLabelKey, - shared: shared, - rbacStore: rbacStore, - } -} - -func (f *scopedFactory) Start(stopCh <-chan struct{}) { - _ = stopCh -} - -func (f *scopedFactory) Shutdown() { - // shared informers are owned by manager-level lifecycle. -} - -func (f *scopedFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - out := f.shared.WaitForCacheSync(stopCh) - if out == nil { - out = map[reflect.Type]bool{} - } - // shared informers are started/synced by manager startup; include scoped essentials. - if f.shared != nil { - out[reflect.TypeOf(&corev1.Secret{})] = f.shared.Core().V1().Secrets().Informer().HasSynced() - out[reflect.TypeOf(&corev1.ServiceAccount{})] = f.shared.Core().V1().ServiceAccounts().Informer().HasSynced() - out[reflect.TypeOf(&corev1.Pod{})] = f.shared.Core().V1().Pods().Informer().HasSynced() - out[reflect.TypeOf(&corev1.Node{})] = f.shared.Core().V1().Nodes().Informer().HasSynced() - out[reflect.TypeOf(&rbacv1.Role{})] = f.shared.Rbac().V1().Roles().Informer().HasSynced() - out[reflect.TypeOf(&rbacv1.RoleBinding{})] = f.shared.Rbac().V1().RoleBindings().Informer().HasSynced() - out[reflect.TypeOf(&rbacv1.ClusterRole{})] = f.shared.Rbac().V1().ClusterRoles().Informer().HasSynced() - out[reflect.TypeOf(&rbacv1.ClusterRoleBinding{})] = f.shared.Rbac().V1().ClusterRoleBindings().Informer().HasSynced() - } - return out -} - -func (f *scopedFactory) ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) { - return f.shared.ForResource(resource) -} - -func (f *scopedFactory) InformerFor(obj runtime.Object, newFunc internalinformers.NewInformerFunc) cache.SharedIndexInformer { - return f.shared.InformerFor(obj, newFunc) -} - -func (f *scopedFactory) Core() coreinformers.Interface { - return &scopedCoreGroup{f: f} -} - -func (f *scopedFactory) Rbac() rbacinformers.Interface { - return &scopedRbacGroup{f: f} -} - -func (f *scopedFactory) Admissionregistration() admissionregistrationinformers.Interface { - return f.shared.Admissionregistration() -} -func (f *scopedFactory) Internal() apiserverinternalinformers.Interface { return f.shared.Internal() } -func (f *scopedFactory) Apps() appsinformers.Interface { return f.shared.Apps() } -func (f *scopedFactory) Autoscaling() autoscalinginformers.Interface { - return f.shared.Autoscaling() -} -func (f *scopedFactory) Batch() batchinformers.Interface { return f.shared.Batch() } -func (f *scopedFactory) Certificates() certificatesinformers.Interface { - return f.shared.Certificates() -} -func (f *scopedFactory) Coordination() coordinationinformers.Interface { - return f.shared.Coordination() -} -func (f *scopedFactory) Discovery() discoveryinformers.Interface { return f.shared.Discovery() } -func (f *scopedFactory) Events() eventsinformers.Interface { return f.shared.Events() } -func (f *scopedFactory) Extensions() extensionsinformers.Interface { - return f.shared.Extensions() -} -func (f *scopedFactory) Flowcontrol() flowcontrolinformers.Interface { - return f.shared.Flowcontrol() -} -func (f *scopedFactory) Networking() networkinginformers.Interface { - return f.shared.Networking() -} -func (f *scopedFactory) Node() nodeinformers.Interface { return f.shared.Node() } -func (f *scopedFactory) Policy() policyinformers.Interface { return f.shared.Policy() } -func (f *scopedFactory) Resource() resourceinformers.Interface { - return f.shared.Resource() -} -func (f *scopedFactory) Scheduling() schedulinginformers.Interface { - return f.shared.Scheduling() -} -func (f *scopedFactory) Storage() storageinformers.Interface { - return f.shared.Storage() -} -func (f *scopedFactory) Storagemigration() storagemigrationinformers.Interface { - return f.shared.Storagemigration() -} - -type scopedCoreGroup struct{ f *scopedFactory } - -func (g *scopedCoreGroup) V1() coreinformersv1.Interface { - return &scopedCoreV1{f: g.f} -} - -type scopedCoreV1 struct{ f *scopedFactory } - -func (v *scopedCoreV1) ComponentStatuses() coreinformersv1.ComponentStatusInformer { - return v.f.shared.Core().V1().ComponentStatuses() -} -func (v *scopedCoreV1) ConfigMaps() coreinformersv1.ConfigMapInformer { - return v.f.shared.Core().V1().ConfigMaps() -} -func (v *scopedCoreV1) Endpoints() coreinformersv1.EndpointsInformer { - return v.f.shared.Core().V1().Endpoints() -} -func (v *scopedCoreV1) Events() coreinformersv1.EventInformer { - return v.f.shared.Core().V1().Events() -} -func (v *scopedCoreV1) LimitRanges() coreinformersv1.LimitRangeInformer { - return v.f.shared.Core().V1().LimitRanges() -} -func (v *scopedCoreV1) Namespaces() coreinformersv1.NamespaceInformer { - return v.f.shared.Core().V1().Namespaces() -} -func (v *scopedCoreV1) Nodes() coreinformersv1.NodeInformer { - base := v.f.shared.Core().V1().Nodes().Informer() - return &scopedNodeInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedNodeLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID, clusterLabelKey: v.f.clusterLabelKey}, - } -} -func (v *scopedCoreV1) PersistentVolumes() coreinformersv1.PersistentVolumeInformer { - return v.f.shared.Core().V1().PersistentVolumes() -} -func (v *scopedCoreV1) PersistentVolumeClaims() coreinformersv1.PersistentVolumeClaimInformer { - return v.f.shared.Core().V1().PersistentVolumeClaims() -} -func (v *scopedCoreV1) Pods() coreinformersv1.PodInformer { - base := v.f.shared.Core().V1().Pods().Informer() - return &scopedPodInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedPodLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID, clusterLabelKey: v.f.clusterLabelKey}, - } -} -func (v *scopedCoreV1) PodTemplates() coreinformersv1.PodTemplateInformer { - return v.f.shared.Core().V1().PodTemplates() -} -func (v *scopedCoreV1) ReplicationControllers() coreinformersv1.ReplicationControllerInformer { - return v.f.shared.Core().V1().ReplicationControllers() -} -func (v *scopedCoreV1) ResourceQuotas() coreinformersv1.ResourceQuotaInformer { - return v.f.shared.Core().V1().ResourceQuotas() -} -func (v *scopedCoreV1) Secrets() coreinformersv1.SecretInformer { - base := v.f.shared.Core().V1().Secrets().Informer() - return &scopedSecretInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedSecretLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID, clusterLabelKey: v.f.clusterLabelKey}, - } -} -func (v *scopedCoreV1) Services() coreinformersv1.ServiceInformer { - return v.f.shared.Core().V1().Services() -} -func (v *scopedCoreV1) ServiceAccounts() coreinformersv1.ServiceAccountInformer { - base := v.f.shared.Core().V1().ServiceAccounts().Informer() - return &scopedServiceAccountInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedServiceAccountLister{indexer: base.GetIndexer(), clusterID: v.f.clusterID, clusterLabelKey: v.f.clusterLabelKey}, - } -} - -type scopedRbacGroup struct{ f *scopedFactory } - -func (g *scopedRbacGroup) V1() rbacinformersv1.Interface { - return &scopedRbacV1{f: g.f} -} -func (g *scopedRbacGroup) V1alpha1() rbacinformersv1alpha1.Interface { - return g.f.shared.Rbac().V1alpha1() -} -func (g *scopedRbacGroup) V1beta1() rbacinformersv1beta1.Interface { - return g.f.shared.Rbac().V1beta1() -} - -type scopedRbacV1 struct{ f *scopedFactory } - -func (v *scopedRbacV1) ClusterRoleBindings() rbacinformersv1.ClusterRoleBindingInformer { - base := v.f.shared.Rbac().V1().ClusterRoleBindings().Informer() - return &scopedClusterRoleBindingInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedClusterRoleBindingLister{store: v.f.rbacStore, clusterID: v.f.clusterID}, - } -} -func (v *scopedRbacV1) ClusterRoles() rbacinformersv1.ClusterRoleInformer { - base := v.f.shared.Rbac().V1().ClusterRoles().Informer() - return &scopedClusterRoleInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedClusterRoleLister{store: v.f.rbacStore, clusterID: v.f.clusterID}, - } -} -func (v *scopedRbacV1) RoleBindings() rbacinformersv1.RoleBindingInformer { - base := v.f.shared.Rbac().V1().RoleBindings().Informer() - return &scopedRoleBindingInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedRoleBindingLister{store: v.f.rbacStore, clusterID: v.f.clusterID}, - } -} -func (v *scopedRbacV1) Roles() rbacinformersv1.RoleInformer { - base := v.f.shared.Rbac().V1().Roles().Informer() - return &scopedRoleInformer{ - informer: newFilteredSharedIndexInformer(base, v.f.clusterID, v.f.clusterLabelKey), - lister: &scopedRoleLister{store: v.f.rbacStore, clusterID: v.f.clusterID}, - } -} - -func newFilteredSharedIndexInformer(shared cache.SharedIndexInformer, clusterID, clusterLabelKey string) cache.SharedIndexInformer { - return scopedinformer.NewFilteredSharedIndexInformer(shared, clusterID, clusterLabelKey) -} - -func objectCluster(obj interface{}, clusterLabelKey string) string { - return scopedinformer.ObjectCluster(obj, clusterLabelKey) -} - -func filteredByCluster(indexer cache.Indexer, clusterID string) []interface{} { - return scopedinformer.FilteredByCluster(indexer, clusterID) -} - type rbacProjectionStore struct { mu sync.RWMutex - clusterLabelKey string clusterRoles map[string]*rbacv1.ClusterRole clusterBindings map[string]*rbacv1.ClusterRoleBinding roles map[string]*rbacv1.Role roleBindings map[string]*rbacv1.RoleBinding } -func newRBACProjectionStore(clusterLabelKey string) *rbacProjectionStore { - if clusterLabelKey == "" { - clusterLabelKey = mc.DefaultClusterAnnotation - } +func newRBACProjectionStore() *rbacProjectionStore { return &rbacProjectionStore{ - clusterLabelKey: clusterLabelKey, clusterRoles: map[string]*rbacv1.ClusterRole{}, clusterBindings: map[string]*rbacv1.ClusterRoleBinding{}, roles: map[string]*rbacv1.Role{}, @@ -302,159 +27,148 @@ func newRBACProjectionStore(clusterLabelKey string) *rbacProjectionStore { } } -func registerRBACProjectionHandlers(store *rbacProjectionStore, roleInf, roleBindingInf, clusterRoleInf, clusterRoleBindingInf cache.SharedIndexInformer) error { - register := func(inf cache.SharedIndexInformer, upsert func(interface{}), del func(interface{})) error { - _, err := inf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: upsert, - UpdateFunc: func(_, newObj interface{}) { upsert(newObj) }, - DeleteFunc: del, - }) - return err - } - if err := register(roleInf, store.upsertRole, store.deleteRole); err != nil { - return err - } - if err := register(roleBindingInf, store.upsertRoleBinding, store.deleteRoleBinding); err != nil { - return err - } - if err := register(clusterRoleInf, store.upsertClusterRole, store.deleteClusterRole); err != nil { - return err - } - if err := register(clusterRoleBindingInf, store.upsertClusterRoleBinding, store.deleteClusterRoleBinding); err != nil { - return err - } - return nil -} - -func (s *rbacProjectionStore) objectKey(obj runtime.Object) string { +func (s *rbacProjectionStore) objectKey(obj runtime.Object, clusterID string) string { acc, err := meta.Accessor(obj) if err != nil { return "" } - clusterID := acc.GetLabels()[s.clusterLabelKey] if clusterID == "" { return "" } return clusterID + "/" + acc.GetNamespace() + "/" + acc.GetName() } -func tombstoneObj(obj interface{}) interface{} { - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - return tombstone.Obj +// toVersionedRBAC converts an internal RBAC type to its versioned equivalent. +// The cacher stores internal types; MCI event handlers receive them wrapped +// in cachingObject envelopes. This function unwraps and converts. +func toVersionedRBAC(obj interface{}) interface{} { + // Unwrap cacher's cachingObject envelope if present. + if co, ok := obj.(runtime.CacheableObject); ok { + obj = co.GetObject() } - return obj -} - -func (s *rbacProjectionStore) upsertClusterRole(obj interface{}) { - cr, ok := tombstoneObj(obj).(*rbacv1.ClusterRole) - if !ok || cr == nil { - return - } - key := s.objectKey(cr) - if key == "" { - return - } - s.mu.Lock() - defer s.mu.Unlock() - s.clusterRoles[key] = cr -} - -func (s *rbacProjectionStore) deleteClusterRole(obj interface{}) { - cr, ok := tombstoneObj(obj).(*rbacv1.ClusterRole) - if !ok || cr == nil { - return - } - key := s.objectKey(cr) - if key == "" { - return + // Fast path: already versioned + switch obj.(type) { + case *rbacv1.Role, *rbacv1.RoleBinding, *rbacv1.ClusterRole, *rbacv1.ClusterRoleBinding: + return obj } - s.mu.Lock() - defer s.mu.Unlock() - delete(s.clusterRoles, key) -} - -func (s *rbacProjectionStore) upsertClusterRoleBinding(obj interface{}) { - crb, ok := tombstoneObj(obj).(*rbacv1.ClusterRoleBinding) - if !ok || crb == nil { - return - } - key := s.objectKey(crb) - if key == "" { - return - } - s.mu.Lock() - defer s.mu.Unlock() - s.clusterBindings[key] = crb -} - -func (s *rbacProjectionStore) deleteClusterRoleBinding(obj interface{}) { - crb, ok := tombstoneObj(obj).(*rbacv1.ClusterRoleBinding) - if !ok || crb == nil { - return + rObj, ok := obj.(runtime.Object) + if !ok { + return obj } - key := s.objectKey(crb) - if key == "" { - return - } - s.mu.Lock() - defer s.mu.Unlock() - delete(s.clusterBindings, key) -} - -func (s *rbacProjectionStore) upsertRole(obj interface{}) { - r, ok := tombstoneObj(obj).(*rbacv1.Role) - if !ok || r == nil { - return - } - key := s.objectKey(r) - if key == "" { - return - } - s.mu.Lock() - defer s.mu.Unlock() - s.roles[key] = r -} - -func (s *rbacProjectionStore) deleteRole(obj interface{}) { - r, ok := tombstoneObj(obj).(*rbacv1.Role) - if !ok || r == nil { - return - } - key := s.objectKey(r) - if key == "" { - return + out, err := legacyscheme.Scheme.ConvertToVersion(rObj, rbacv1.SchemeGroupVersion) + if err != nil { + return obj } - s.mu.Lock() - defer s.mu.Unlock() - delete(s.roles, key) + return out } -func (s *rbacProjectionStore) upsertRoleBinding(obj interface{}) { - rb, ok := tombstoneObj(obj).(*rbacv1.RoleBinding) - if !ok || rb == nil { +// upsertWithCluster handles Add/Update events from MCI by type-switching on the +// RBAC object and inserting into the appropriate map. +func (s *rbacProjectionStore) upsertWithCluster(obj interface{}, clusterID string) { + if clusterID == "" { return } - key := s.objectKey(rb) - if key == "" { - return + obj = toVersionedRBAC(obj) + switch o := obj.(type) { + case *rbacv1.Role: + if o == nil { + return + } + key := s.objectKey(o, clusterID) + if key == "" { + return + } + s.mu.Lock() + s.roles[key] = o + s.mu.Unlock() + case *rbacv1.RoleBinding: + if o == nil { + return + } + key := s.objectKey(o, clusterID) + if key == "" { + return + } + s.mu.Lock() + s.roleBindings[key] = o + s.mu.Unlock() + case *rbacv1.ClusterRole: + if o == nil { + return + } + key := s.objectKey(o, clusterID) + if key == "" { + return + } + s.mu.Lock() + s.clusterRoles[key] = o + s.mu.Unlock() + case *rbacv1.ClusterRoleBinding: + if o == nil { + return + } + key := s.objectKey(o, clusterID) + if key == "" { + return + } + s.mu.Lock() + s.clusterBindings[key] = o + s.mu.Unlock() } - s.mu.Lock() - defer s.mu.Unlock() - s.roleBindings[key] = rb } -func (s *rbacProjectionStore) deleteRoleBinding(obj interface{}) { - rb, ok := tombstoneObj(obj).(*rbacv1.RoleBinding) - if !ok || rb == nil { +// deleteWithCluster handles Delete events from MCI. +func (s *rbacProjectionStore) deleteWithCluster(obj interface{}, clusterID string) { + if clusterID == "" { return } - key := s.objectKey(rb) - if key == "" { - return + obj = toVersionedRBAC(obj) + switch o := obj.(type) { + case *rbacv1.Role: + if o == nil { + return + } + key := s.objectKey(o, clusterID) + if key == "" { + return + } + s.mu.Lock() + delete(s.roles, key) + s.mu.Unlock() + case *rbacv1.RoleBinding: + if o == nil { + return + } + key := s.objectKey(o, clusterID) + if key == "" { + return + } + s.mu.Lock() + delete(s.roleBindings, key) + s.mu.Unlock() + case *rbacv1.ClusterRole: + if o == nil { + return + } + key := s.objectKey(o, clusterID) + if key == "" { + return + } + s.mu.Lock() + delete(s.clusterRoles, key) + s.mu.Unlock() + case *rbacv1.ClusterRoleBinding: + if o == nil { + return + } + key := s.objectKey(o, clusterID) + if key == "" { + return + } + s.mu.Lock() + delete(s.clusterBindings, key) + s.mu.Unlock() } - s.mu.Lock() - defer s.mu.Unlock() - delete(s.roleBindings, key) } func (s *rbacProjectionStore) listClusterRoles(clusterID string) []*rbacv1.ClusterRole { @@ -489,12 +203,6 @@ func (s *rbacProjectionStore) listClusterRoleBindings(clusterID string) []*rbacv return ret } -func (s *rbacProjectionStore) getClusterRoleBinding(clusterID, name string) *rbacv1.ClusterRoleBinding { - s.mu.RLock() - defer s.mu.RUnlock() - return s.clusterBindings[clusterID+"//"+name] -} - func (s *rbacProjectionStore) listRoles(clusterID string) []*rbacv1.Role { s.mu.RLock() defer s.mu.RUnlock() @@ -520,388 +228,3 @@ func (s *rbacProjectionStore) listRoleBindings(clusterID string) []*rbacv1.RoleB } return ret } - -// RBAC listers/informers -type scopedClusterRoleInformer struct { - informer cache.SharedIndexInformer - lister rbaclisters.ClusterRoleLister -} - -func (i *scopedClusterRoleInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedClusterRoleInformer) Lister() rbaclisters.ClusterRoleLister { return i.lister } - -type scopedClusterRoleBindingInformer struct { - informer cache.SharedIndexInformer - lister rbaclisters.ClusterRoleBindingLister -} - -func (i *scopedClusterRoleBindingInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedClusterRoleBindingInformer) Lister() rbaclisters.ClusterRoleBindingLister { - return i.lister -} - -type scopedRoleInformer struct { - informer cache.SharedIndexInformer - lister rbaclisters.RoleLister -} - -func (i *scopedRoleInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedRoleInformer) Lister() rbaclisters.RoleLister { return i.lister } - -type scopedRoleBindingInformer struct { - informer cache.SharedIndexInformer - lister rbaclisters.RoleBindingLister -} - -func (i *scopedRoleBindingInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedRoleBindingInformer) Lister() rbaclisters.RoleBindingLister { return i.lister } - -type scopedClusterRoleLister struct { - store *rbacProjectionStore - clusterID string -} - -func (l *scopedClusterRoleLister) List(sel labels.Selector) (ret []*rbacv1.ClusterRole, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, obj := range l.store.listClusterRoles(l.clusterID) { - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedClusterRoleLister) Get(name string) (*rbacv1.ClusterRole, error) { - if obj := l.store.getClusterRole(l.clusterID, name); obj != nil { - return obj, nil - } - return nil, fmt.Errorf("clusterrole %q not found", name) -} - -type scopedClusterRoleBindingLister struct { - store *rbacProjectionStore - clusterID string -} - -func (l *scopedClusterRoleBindingLister) List(sel labels.Selector) (ret []*rbacv1.ClusterRoleBinding, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, obj := range l.store.listClusterRoleBindings(l.clusterID) { - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedClusterRoleBindingLister) Get(name string) (*rbacv1.ClusterRoleBinding, error) { - if obj := l.store.getClusterRoleBinding(l.clusterID, name); obj != nil { - return obj, nil - } - return nil, fmt.Errorf("clusterrolebinding %q not found", name) -} - -type scopedRoleLister struct { - store *rbacProjectionStore - clusterID string -} - -func (l *scopedRoleLister) List(sel labels.Selector) (ret []*rbacv1.Role, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, obj := range l.store.listRoles(l.clusterID) { - if !sel.Matches(labels.Set(obj.Labels)) { - continue - } - ret = append(ret, obj) - } - return ret, nil -} -func (l *scopedRoleLister) Roles(ns string) rbaclisters.RoleNamespaceLister { - return &scopedRoleNamespaceLister{parent: l, namespace: ns} -} - -type scopedRoleNamespaceLister struct { - parent *scopedRoleLister - namespace string -} - -func (l *scopedRoleNamespaceLister) List(sel labels.Selector) (ret []*rbacv1.Role, err error) { - all, _ := l.parent.List(sel) - for _, obj := range all { - if obj.Namespace == l.namespace { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedRoleNamespaceLister) Get(name string) (*rbacv1.Role, error) { - all, _ := l.parent.List(labels.Everything()) - for _, obj := range all { - if obj.Namespace == l.namespace && obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("role %s/%s not found", l.namespace, name) -} - -type scopedRoleBindingLister struct { - store *rbacProjectionStore - clusterID string -} - -func (l *scopedRoleBindingLister) List(sel labels.Selector) (ret []*rbacv1.RoleBinding, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, obj := range l.store.listRoleBindings(l.clusterID) { - if !sel.Matches(labels.Set(obj.Labels)) { - continue - } - ret = append(ret, obj) - } - return ret, nil -} -func (l *scopedRoleBindingLister) RoleBindings(ns string) rbaclisters.RoleBindingNamespaceLister { - return &scopedRoleBindingNamespaceLister{parent: l, namespace: ns} -} - -type scopedRoleBindingNamespaceLister struct { - parent *scopedRoleBindingLister - namespace string -} - -func (l *scopedRoleBindingNamespaceLister) List(sel labels.Selector) (ret []*rbacv1.RoleBinding, err error) { - all, _ := l.parent.List(sel) - for _, obj := range all { - if obj.Namespace == l.namespace { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedRoleBindingNamespaceLister) Get(name string) (*rbacv1.RoleBinding, error) { - all, _ := l.parent.List(labels.Everything()) - for _, obj := range all { - if obj.Namespace == l.namespace && obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("rolebinding %s/%s not found", l.namespace, name) -} - -// Core listers/informers for authn -type scopedSecretInformer struct { - informer cache.SharedIndexInformer - lister corelisters.SecretLister -} - -func (i *scopedSecretInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedSecretInformer) Lister() corelisters.SecretLister { return i.lister } - -type scopedServiceAccountInformer struct { - informer cache.SharedIndexInformer - lister corelisters.ServiceAccountLister -} - -func (i *scopedServiceAccountInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedServiceAccountInformer) Lister() corelisters.ServiceAccountLister { return i.lister } - -type scopedPodInformer struct { - informer cache.SharedIndexInformer - lister corelisters.PodLister -} - -func (i *scopedPodInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedPodInformer) Lister() corelisters.PodLister { return i.lister } - -type scopedNodeInformer struct { - informer cache.SharedIndexInformer - lister corelisters.NodeLister -} - -func (i *scopedNodeInformer) Informer() cache.SharedIndexInformer { return i.informer } -func (i *scopedNodeInformer) Lister() corelisters.NodeLister { return i.lister } - -type scopedSecretLister struct { - indexer cache.Indexer - clusterID string - clusterLabelKey string -} - -func (l *scopedSecretLister) List(sel labels.Selector) (ret []*corev1.Secret, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Secret) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedSecretLister) Secrets(ns string) corelisters.SecretNamespaceLister { - return &scopedSecretNamespaceLister{parent: l, namespace: ns} -} - -type scopedSecretNamespaceLister struct { - parent *scopedSecretLister - namespace string -} - -func (l *scopedSecretNamespaceLister) List(sel labels.Selector) (ret []*corev1.Secret, err error) { - all, _ := l.parent.List(sel) - for _, obj := range all { - if obj.Namespace == l.namespace { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedSecretNamespaceLister) Get(name string) (*corev1.Secret, error) { - all, _ := l.parent.List(labels.Everything()) - for _, obj := range all { - if obj.Namespace == l.namespace && obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("secret %s/%s not found", l.namespace, name) -} - -type scopedServiceAccountLister struct { - indexer cache.Indexer - clusterID string - clusterLabelKey string -} - -func (l *scopedServiceAccountLister) List(sel labels.Selector) (ret []*corev1.ServiceAccount, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.ServiceAccount) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedServiceAccountLister) ServiceAccounts(ns string) corelisters.ServiceAccountNamespaceLister { - return &scopedServiceAccountNamespaceLister{parent: l, namespace: ns} -} - -type scopedServiceAccountNamespaceLister struct { - parent *scopedServiceAccountLister - namespace string -} - -func (l *scopedServiceAccountNamespaceLister) List(sel labels.Selector) (ret []*corev1.ServiceAccount, err error) { - all, _ := l.parent.List(sel) - for _, obj := range all { - if obj.Namespace == l.namespace { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedServiceAccountNamespaceLister) Get(name string) (*corev1.ServiceAccount, error) { - all, _ := l.parent.List(labels.Everything()) - for _, obj := range all { - if obj.Namespace == l.namespace && obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("serviceaccount %s/%s not found", l.namespace, name) -} - -type scopedPodLister struct { - indexer cache.Indexer - clusterID string - clusterLabelKey string -} - -func (l *scopedPodLister) List(sel labels.Selector) (ret []*corev1.Pod, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Pod) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedPodLister) Pods(ns string) corelisters.PodNamespaceLister { - return &scopedPodNamespaceLister{parent: l, namespace: ns} -} - -type scopedPodNamespaceLister struct { - parent *scopedPodLister - namespace string -} - -func (l *scopedPodNamespaceLister) List(sel labels.Selector) (ret []*corev1.Pod, err error) { - all, _ := l.parent.List(sel) - for _, obj := range all { - if obj.Namespace == l.namespace { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedPodNamespaceLister) Get(name string) (*corev1.Pod, error) { - all, _ := l.parent.List(labels.Everything()) - for _, obj := range all { - if obj.Namespace == l.namespace && obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("pod %s/%s not found", l.namespace, name) -} - -type scopedNodeLister struct { - indexer cache.Indexer - clusterID string - clusterLabelKey string -} - -func (l *scopedNodeLister) List(sel labels.Selector) (ret []*corev1.Node, err error) { - if sel == nil { - sel = labels.Everything() - } - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Node) - if !ok { - continue - } - if sel.Matches(labels.Set(obj.Labels)) { - ret = append(ret, obj) - } - } - return ret, nil -} -func (l *scopedNodeLister) Get(name string) (*corev1.Node, error) { - for _, it := range filteredByCluster(l.indexer, l.clusterID) { - obj, ok := it.(*corev1.Node) - if !ok { - continue - } - if obj.Name == name { - return obj, nil - } - } - return nil, fmt.Errorf("node %q not found", name) -} diff --git a/pkg/multicluster/bootstrap/crd_controller.go b/pkg/multicluster/bootstrap/crd_controller.go index 2247812..3632452 100644 --- a/pkg/multicluster/bootstrap/crd_controller.go +++ b/pkg/multicluster/bootstrap/crd_controller.go @@ -41,10 +41,13 @@ func (c *MulticlusterCRDController) Start(stopCh <-chan struct{}) { c.mu.Unlock() go c.run() + if c.defaultCluster != "" { + c.EnsureCluster(c.defaultCluster) + } } func (c *MulticlusterCRDController) EnsureCluster(clusterID string) { - if c == nil || clusterID == "" || clusterID == c.defaultCluster { + if c == nil || clusterID == "" { return } diff --git a/pkg/multicluster/bootstrap/crd_runtime_manager_wrapped.go b/pkg/multicluster/bootstrap/crd_runtime_manager_wrapped.go index 2e20978..fdacd97 100644 --- a/pkg/multicluster/bootstrap/crd_runtime_manager_wrapped.go +++ b/pkg/multicluster/bootstrap/crd_runtime_manager_wrapped.go @@ -13,15 +13,17 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" @@ -29,6 +31,8 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" mc "github.com/kplane-dev/apiserver/pkg/multicluster" + mcinformer "github.com/kplane-dev/informer" + mcstorage "github.com/kplane-dev/storage" ) var ( @@ -41,12 +45,12 @@ var ( ) type CRDRuntimeManagerOptions struct { - BaseAPIExtensionsConfig *apiextensionsapiserver.Config - APIExtensionsInformerPool *mc.APIExtensionsInformerPool - PathPrefix string - ControlPlaneSegment string - DefaultCluster string - Delegate http.Handler + BaseAPIExtensionsConfig *apiextensionsapiserver.Config + InformerRegistry *mc.InformerRegistry + PathPrefix string + ControlPlaneSegment string + DefaultCluster string + Delegate http.Handler } type runtimeEntry struct { @@ -68,13 +72,11 @@ type CRDRuntimeManager struct { sharedCRDSF singleflight.Group // Informer-backed serves index state. - servesIndex *CRDServesIndex - sharedProjection *crdProjectionStore - sharedStarted bool - sharedFactory apiextensionsinformers.SharedInformerFactory - sharedStopCh <-chan struct{} - sharedOwnedStop chan struct{} - crdQueue workqueue.TypedRateLimitingInterface[string] + servesIndex *CRDServesIndex + sharedProjection *crdProjectionStore + sharedStarted bool + crdMCI *mcinformer.MultiClusterInformer + crdQueue workqueue.TypedRateLimitingInterface[string] crdWorkersStarted bool } @@ -94,6 +96,7 @@ func NewCRDRuntimeManager(opts CRDRuntimeManagerOptions) *CRDRuntimeManager { } } +// Start initializes the shared CRD informer/workers used for status and delete reconciliation. func (m *CRDRuntimeManager) Runtime(clusterID string, stopCh <-chan struct{}) (http.Handler, error) { if m == nil || clusterID == "" || clusterID == m.opts.DefaultCluster || m.opts.BaseAPIExtensionsConfig == nil { return nil, nil @@ -131,7 +134,22 @@ func (m *CRDRuntimeManager) ServesGroupVersion(clusterID, group, version string, crdServesLookupLat.WithLabelValues(r).Observe(time.Since(start).Seconds()) return served, nil } - // No fallback direct API lookup: shared projection is the source of truth. + // Reconcile from shared projection, then briefly wait for projection updates + // to absorb fresh CRD install/update events before returning not served. + if served, ok := m.lookupFromSharedProjection(clusterID, group, version); ok { + r := result(served) + crdServesCacheMiss.WithLabelValues("projection").Inc() + crdServesLookupTotal.WithLabelValues(r).Inc() + crdServesLookupLat.WithLabelValues(r).Observe(time.Since(start).Seconds()) + return served, nil + } + if served, ok := m.waitForSharedProjection(clusterID, group, version, 2*time.Second); ok { + r := result(served) + crdServesCacheMiss.WithLabelValues("projection-wait").Inc() + crdServesLookupTotal.WithLabelValues(r).Inc() + crdServesLookupLat.WithLabelValues(r).Observe(time.Since(start).Seconds()) + return served, nil + } r := result(false) crdServesLookupTotal.WithLabelValues(r).Inc() crdServesLookupLat.WithLabelValues(r).Observe(time.Since(start).Seconds()) @@ -180,7 +198,7 @@ func (m *CRDRuntimeManager) ensureSharedRuntime(stopCh <-chan struct{}) (runtime go crdServer.GenericAPIServer.RunPostStartHooks(runCtx) entry := runtimeEntry{ - handler: crdServer.GenericAPIServer.Handler.NonGoRestfulMux, + handler: crdServer.GenericAPIServer.Handler.Director, server: crdServer.GenericAPIServer, cancel: cancel, } @@ -261,6 +279,52 @@ func (m *CRDRuntimeManager) lookupFromInformerIndex(clusterID, group, version st return m.servesIndex.Lookup(clusterID, group, version) } +func (m *CRDRuntimeManager) lookupFromSharedProjection(clusterID, group, version string) (bool, bool) { + crds := m.sharedProjection.List(clusterID) + objs := make([]interface{}, 0, len(crds)) + served := false + for _, crd := range crds { + if crd == nil { + continue + } + objs = append(objs, crd) + if !isCRDEstablished(crd) || crd.Spec.Group != group { + continue + } + for _, v := range crd.Spec.Versions { + if v.Served && v.Name == version { + served = true + break + } + } + } + m.rebuildClusterIndex(clusterID, objs) + return served, true +} + +func (m *CRDRuntimeManager) waitForSharedProjection(clusterID, group, version string, timeout time.Duration) (bool, bool) { + if timeout <= 0 { + timeout = 2 * time.Second + } + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + if served, ok := m.lookupFromInformerIndex(clusterID, group, version); ok { + return served, true + } + if served, ok := m.lookupFromSharedProjection(clusterID, group, version); ok && served { + return true, true + } + time.Sleep(25 * time.Millisecond) + } + if served, ok := m.lookupFromInformerIndex(clusterID, group, version); ok { + return served, true + } + if served, ok := m.lookupFromSharedProjection(clusterID, group, version); ok { + return served, true + } + return false, false +} + func (m *CRDRuntimeManager) ensureSharedCRDState(stopCh <-chan struct{}) error { m.mu.Lock() if m.sharedStarted { @@ -277,46 +341,64 @@ func (m *CRDRuntimeManager) ensureSharedCRDState(stopCh <-chan struct{}) error { } m.mu.Unlock() - base := m.baseLoopbackConfig() - if base == nil { - return nil, fmt.Errorf("base apiextensions loopback config is required for shared CRD informer") + if m.opts.InformerRegistry == nil { + return nil, fmt.Errorf("InformerRegistry is required for shared CRD state") } - cs, err := allClustersAPIExtensionsClient(base) + + mci, err := m.opts.InformerRegistry.Get(schema.GroupResource{ + Group: "apiextensions.k8s.io", + Resource: "customresourcedefinitions", + }) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get MCI for CRDs: %w", err) } - factory := apiextensionsinformers.NewSharedInformerFactory(cs, 0) - crdInformer := factory.Apiextensions().V1().CustomResourceDefinitions().Informer() - if err := crdInformer.SetTransform(transformCRDForShared(mc.DefaultClusterAnnotation)); err != nil { - return nil, err - } - _, err = crdInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - m.onSharedCRDUpsert(obj) + mci.AddEventHandler(mcinformer.MultiClusterEventHandlerFuncs{ + AddFunc: func(obj *mcstorage.ObjectWithClusterIdentity, _ bool) { + m.onMCICRDUpsert(obj) }, - UpdateFunc: func(_, newObj interface{}) { - m.onSharedCRDUpsert(newObj) + UpdateFunc: func(_, newObj *mcstorage.ObjectWithClusterIdentity) { + m.onMCICRDUpsert(newObj) }, - DeleteFunc: func(obj interface{}) { - m.onSharedCRDDelete(obj) + DeleteFunc: func(obj *mcstorage.ObjectWithClusterIdentity) { + m.onMCICRDDelete(obj) }, }) - if err != nil { - return nil, err + + ctx := context.Background() + if stopCh != nil { + ctx = wait.ContextForChannel(stopCh) + } + for !mci.HasSynced() { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + time.Sleep(10 * time.Millisecond) + } } - startStop := m.sharedStartStopCh(stopCh) - factory.Start(startStop) - if !cache.WaitForCacheSync(startStop, crdInformer.HasSynced) { - return nil, fmt.Errorf("failed waiting for shared CRD informer sync") + // Backfill existing objects (late handler registration misses existing items). + for _, clusterID := range mci.Clusters() { + objs := mci.List(clusterID) + for _, obj := range objs { + crd, ok := toVersionedCRD(obj) + if !ok { + continue + } + m.sharedProjection.Upsert(clusterID, crd) + m.servesIndex.UpsertCRD(clusterID, crd) + } } - m.startSharedCRDWorkers(startStop) - m.rebuildSharedProjection(crdInformer.GetStore().List()) + workerStopCh := stopCh + if workerStopCh == nil { + workerStopCh = make(chan struct{}) + } + m.startSharedCRDWorkers(workerStopCh) m.mu.Lock() - m.sharedFactory = factory + m.crdMCI = mci m.sharedStarted = true m.mu.Unlock() return nil, nil @@ -328,63 +410,71 @@ func (m *CRDRuntimeManager) rebuildClusterIndex(clusterID string, objs []interfa m.servesIndex.RebuildCluster(clusterID, objs) } -func (m *CRDRuntimeManager) rebuildSharedProjection(objs []interface{}) { - decodedObjs := make([]interface{}, 0, len(objs)) - byCluster := map[string][]interface{}{} - for _, obj := range objs { - crd, ok := crdFromObj(obj) - if !ok { - continue - } - clusterID := objectClusterID(crd, mc.DefaultClusterAnnotation) - if clusterID == "" { - continue - } - decoded := decodeSharedCRD(clusterID, crd) - decodedObjs = append(decodedObjs, decoded) - byCluster[clusterID] = append(byCluster[clusterID], decoded) +// toVersionedCRD converts an internal CRD type to its versioned equivalent. +// The cacher stores internal types; MCI event handlers receive them wrapped +// in cachingObject envelopes. This function unwraps and converts using the +// apiextensions scheme (legacyscheme doesn't register apiextensions types). +func toVersionedCRD(obj interface{}) (*apiextensionsv1.CustomResourceDefinition, bool) { + // Unwrap cacher's cachingObject envelope if present. + if co, ok := obj.(runtime.CacheableObject); ok { + obj = co.GetObject() + } + // Fast path: already versioned + if crd, ok := obj.(*apiextensionsv1.CustomResourceDefinition); ok { + return crd, true } - m.sharedProjection.ReplaceAll(decodedObjs, mc.DefaultClusterAnnotation) - for clusterID, clusterObjs := range byCluster { - m.servesIndex.RebuildCluster(clusterID, clusterObjs) + rObj, ok := obj.(runtime.Object) + if !ok { + return nil, false } + out := &apiextensionsv1.CustomResourceDefinition{} + if err := apiextensionsapiserver.Scheme.Convert(rObj, out, nil); err != nil { + return nil, false + } + return out, true } -func (m *CRDRuntimeManager) onSharedCRDUpsert(obj interface{}) { - crd, ok := crdFromObj(obj) - if !ok { - return +func (m *CRDRuntimeManager) onMCICRDUpsert(obj *mcstorage.ObjectWithClusterIdentity) { + clusterID := obj.ClusterID + if clusterID == "" { + clusterID = m.opts.DefaultCluster } - clusterID := objectClusterID(crd, mc.DefaultClusterAnnotation) if clusterID == "" { return } - crd = decodeSharedCRD(clusterID, crd) + crd, ok := toVersionedCRD(obj.Object) + if !ok { + return + } m.sharedProjection.Upsert(clusterID, crd) m.servesIndex.UpsertCRD(clusterID, crd) m.enqueueCRDStatus(clusterID, crd.Name) } -func (m *CRDRuntimeManager) onSharedCRDDelete(obj interface{}) { - crd, ok := crdFromObj(obj) - if !ok { - return +func (m *CRDRuntimeManager) onMCICRDDelete(obj *mcstorage.ObjectWithClusterIdentity) { + clusterID := obj.ClusterID + if clusterID == "" { + clusterID = m.opts.DefaultCluster } - clusterID := objectClusterID(crd, mc.DefaultClusterAnnotation) if clusterID == "" { return } - crd = decodeSharedCRD(clusterID, crd) + crd, ok := toVersionedCRD(obj.Object) + if !ok { + return + } m.sharedProjection.Delete(clusterID, crd.Name) m.servesIndex.DeleteCRD(clusterID, crd) } func (m *CRDRuntimeManager) EnsureCluster(clusterID string, stopCh <-chan struct{}) error { - if m == nil || clusterID == "" || clusterID == m.opts.DefaultCluster { + if m == nil || clusterID == "" { return nil } - if _, err := m.ensureSharedRuntime(stopCh); err != nil { - return err + if clusterID != m.opts.DefaultCluster { + if _, err := m.ensureSharedRuntime(stopCh); err != nil { + return err + } } if err := m.ensureSharedCRDState(stopCh); err != nil { return err @@ -437,21 +527,6 @@ func ServedKeysForCRD(clusterID string, crd *apiextensionsv1.CustomResourceDefin return keys } -func crdFromObj(obj interface{}) (*apiextensionsv1.CustomResourceDefinition, bool) { - if obj == nil { - return nil, false - } - if crd, ok := obj.(*apiextensionsv1.CustomResourceDefinition); ok { - return crd, true - } - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - if crd, ok := tombstone.Obj.(*apiextensionsv1.CustomResourceDefinition); ok { - return crd, true - } - } - return nil, false -} - func isCRDEstablished(crd *apiextensionsv1.CustomResourceDefinition) bool { for _, c := range crd.Status.Conditions { if c.Type == apiextensionsv1.Established && c.Status == apiextensionsv1.ConditionTrue { @@ -546,32 +621,6 @@ func (s *crdProjectionStore) List(clusterID string) []*apiextensionsv1.CustomRes return out } -func (s *crdProjectionStore) ReplaceAll(objs []interface{}, clusterLabelKey string) { - if s == nil { - return - } - next := map[string]map[string]*apiextensionsv1.CustomResourceDefinition{} - for _, obj := range objs { - crd, ok := crdFromObj(obj) - if !ok { - continue - } - clusterID := objectClusterID(crd, clusterLabelKey) - if clusterID == "" { - continue - } - clusterMap, ok := next[clusterID] - if !ok { - clusterMap = map[string]*apiextensionsv1.CustomResourceDefinition{} - next[clusterID] = clusterMap - } - clusterMap[crd.Name] = crd.DeepCopy() - } - s.mu.Lock() - s.byCluster = next - s.mu.Unlock() -} - func (m *CRDRuntimeManager) baseLoopbackConfig() *rest.Config { if m == nil || m.opts.BaseAPIExtensionsConfig == nil || m.opts.BaseAPIExtensionsConfig.GenericConfig == nil { return nil @@ -579,14 +628,6 @@ func (m *CRDRuntimeManager) baseLoopbackConfig() *rest.Config { return m.opts.BaseAPIExtensionsConfig.GenericConfig.LoopbackClientConfig } -func allClustersAPIExtensionsClient(base *rest.Config) (apiextensionsclient.Interface, error) { - if base == nil { - return nil, fmt.Errorf("base loopback config is required") - } - cfg := allClustersLoopbackConfig(base) - return apiextensionsclient.NewForConfig(cfg) -} - func allClustersLoopbackConfig(base *rest.Config) *rest.Config { cfg := rest.CopyConfig(base) cfg.Impersonate.UserName = mc.DefaultInternalCrossClusterUser @@ -603,83 +644,20 @@ func allClustersLoopbackConfig(base *rest.Config) *rest.Config { return cfg } -func (m *CRDRuntimeManager) sharedStartStopCh(stopCh <-chan struct{}) <-chan struct{} { - m.mu.Lock() - defer m.mu.Unlock() - if m.sharedStopCh != nil { - return m.sharedStopCh - } - if stopCh != nil { - m.sharedStopCh = stopCh - return m.sharedStopCh - } - m.sharedOwnedStop = make(chan struct{}) - m.sharedStopCh = m.sharedOwnedStop - return m.sharedStopCh -} - -func objectClusterID(obj interface{}, clusterLabelKey string) string { - if clusterLabelKey == "" { - clusterLabelKey = mc.DefaultClusterAnnotation - } - accessor, err := meta.Accessor(obj) - if err != nil { - return "" - } - return accessor.GetLabels()[clusterLabelKey] -} - const sharedCRDNamePrefix = "__mc_shared_crd__" -func transformCRDForShared(clusterLabelKey string) cache.TransformFunc { - return func(obj interface{}) (interface{}, error) { - crd, ok := obj.(*apiextensionsv1.CustomResourceDefinition) - if !ok || crd == nil { - return obj, nil - } - clusterID := objectClusterID(crd, clusterLabelKey) - if clusterID == "" { - return obj, nil - } - cp := crd.DeepCopy() - cp.Name = encodeSharedCRDName(clusterID, cp.Name) - return cp, nil - } -} - -func encodeSharedCRDName(clusterID, name string) string { - if clusterID == "" || name == "" { - return name - } - prefix := sharedCRDNamePrefix + clusterID + "__" - if strings.HasPrefix(name, prefix) { - return name - } - return prefix + name -} - // EncodeSharedCRDResourceName returns a cluster-unique resource token that // preserves the "." CRD name relation used by // upstream CRD handler lookup. func EncodeSharedCRDResourceName(clusterID, resource string) string { - return encodeSharedCRDName(clusterID, resource) -} - -func decodeSharedCRDName(clusterID, name string) string { - if clusterID == "" || name == "" { - return name + if clusterID == "" || resource == "" { + return resource } prefix := sharedCRDNamePrefix + clusterID + "__" - return strings.TrimPrefix(name, prefix) -} - -func decodeSharedCRD(clusterID string, crd *apiextensionsv1.CustomResourceDefinition) *apiextensionsv1.CustomResourceDefinition { - if crd == nil { - return nil + if strings.HasPrefix(resource, prefix) { + return resource } - cp := crd.DeepCopy() - cp.Name = decodeSharedCRDName(clusterID, cp.Name) - return cp + return prefix + resource } const sharedCRDWorkerCount = 6 @@ -737,11 +715,30 @@ func (m *CRDRuntimeManager) reconcileCRDStatusKey(key string) error { if !ok { return nil } - crd, found := m.sharedProjection.Get(clusterID, name) - if !found || crd == nil { + _, found := m.sharedProjection.Get(clusterID, name) + if !found { + return nil + } + cs, err := m.ensureClusterClient(clusterID) + if err != nil { + return err + } + live, err := cs.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + if live.DeletionTimestamp != nil && hasCRDCleanupFinalizer(live) { + return m.reconcileDeletingCRD(clusterID, live) + } + // Let upstream finalizer/status controllers own terminating CRDs. + // Overwriting status during deletion can stall customresourcecleanup. + if live.DeletionTimestamp != nil { return nil } - desired := crd.DeepCopy() + desired := live.DeepCopy() desired.Status.AcceptedNames = desired.Spec.Names namesAccepted := apiextensionsv1.CustomResourceDefinitionCondition{ @@ -768,20 +765,172 @@ func (m *CRDRuntimeManager) reconcileCRDStatusKey(key string) error { } apiextensionshelpers.SetCRDCondition(desired, established) - if equality.Semantic.DeepEqual(crd.Status, desired.Status) { + if equality.Semantic.DeepEqual(live.Status, desired.Status) { return nil } + _, err = cs.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), desired, metav1.UpdateOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + if apierrors.IsConflict(err) { + return err + } + return err +} +func hasCRDCleanupFinalizer(crd *apiextensionsv1.CustomResourceDefinition) bool { + for _, f := range crd.Finalizers { + if f == apiextensionsv1.CustomResourceCleanupFinalizer { + return true + } + } + return false +} + +func (m *CRDRuntimeManager) reconcileDeletingCRD(clusterID string, crd *apiextensionsv1.CustomResourceDefinition) error { cs, err := m.ensureClusterClient(clusterID) if err != nil { return err } - _, err = cs.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), desired, metav1.UpdateOptions{}) + + live, err := cs.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil } - if apierrors.IsConflict(err) { + if err != nil { return err } - return err + if live.DeletionTimestamp == nil || !hasCRDCleanupFinalizer(live) { + return nil + } + + inProgress := live.DeepCopy() + apiextensionshelpers.SetCRDCondition(inProgress, apiextensionsv1.CustomResourceDefinitionCondition{ + Type: apiextensionsv1.Terminating, + Status: apiextensionsv1.ConditionTrue, + Reason: "InstanceDeletionInProgress", + Message: "CustomResource deletion is in progress", + }) + if _, err := cs.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), inProgress, metav1.UpdateOptions{}); err != nil { + if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { + return nil + } + return err + } + + cond, delErr := m.deleteCRInstances(clusterID, live) + + live, err = cs.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + next := live.DeepCopy() + apiextensionshelpers.SetCRDCondition(next, cond) + if delErr != nil { + if _, err := cs.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), next, metav1.UpdateOptions{}); err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { + klog.Errorf("mc.crd status update failed while deleting instances cluster=%s crd=%s err=%v", clusterID, crd.Name, err) + } + return delErr + } + + apiextensionshelpers.CRDRemoveFinalizer(next, apiextensionsv1.CustomResourceCleanupFinalizer) + if _, err := cs.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), next, metav1.UpdateOptions{}); err != nil { + if apierrors.IsNotFound(err) || apierrors.IsConflict(err) { + return nil + } + return err + } + return nil } + +func (m *CRDRuntimeManager) deleteCRInstances(clusterID string, crd *apiextensionsv1.CustomResourceDefinition) (apiextensionsv1.CustomResourceDefinitionCondition, error) { + dc, err := m.dynamicClientForCluster(clusterID) + if err != nil { + return deletingCond("InstanceDeletionFailed", fmt.Sprintf("could not create dynamic client: %v", err)), err + } + + storageVersion, err := apiextensionshelpers.GetCRDStorageVersion(crd) + if err != nil { + return deletingCond("InstanceDeletionFailed", fmt.Sprintf("could not resolve storage version: %v", err)), err + } + resource := crd.Status.AcceptedNames.Plural + if resource == "" { + resource = crd.Spec.Names.Plural + } + gvr := schema.GroupVersionResource{ + Group: crd.Spec.Group, + Version: storageVersion, + Resource: resource, + } + rc := dc.Resource(gvr) + + ctx := context.TODO() + list, err := rc.List(ctx, metav1.ListOptions{}) + if err != nil { + return deletingCond("InstanceDeletionFailed", fmt.Sprintf("could not list instances: %v", err)), err + } + + if crd.Spec.Scope == apiextensionsv1.NamespaceScoped { + seen := sets.New[string]() + for _, item := range list.Items { + ns := item.GetNamespace() + if ns == "" || seen.Has(ns) { + continue + } + seen.Insert(ns) + if err := rc.Namespace(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil && !apierrors.IsNotFound(err) { + return deletingCond("InstanceDeletionFailed", fmt.Sprintf("could not issue all deletes: %v", err)), err + } + } + } else { + if err := rc.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil && !apierrors.IsNotFound(err) { + return deletingCond("InstanceDeletionFailed", fmt.Sprintf("could not issue all deletes: %v", err)), err + } + } + + err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 1*time.Minute, true, func(ctx context.Context) (bool, error) { + l, err := rc.List(ctx, metav1.ListOptions{}) + if err != nil { + return false, err + } + return len(l.Items) == 0, nil + }) + if err != nil { + return deletingCond("InstanceDeletionCheck", fmt.Sprintf("could not confirm zero CustomResources remaining: %v", err)), err + } + return deletingCond("InstanceDeletionCompleted", "removed all instances"), nil +} + +func deletingCond(reason, message string) apiextensionsv1.CustomResourceDefinitionCondition { + status := apiextensionsv1.ConditionTrue + if reason == "InstanceDeletionCompleted" { + status = apiextensionsv1.ConditionFalse + } + return apiextensionsv1.CustomResourceDefinitionCondition{ + Type: apiextensionsv1.Terminating, + Status: status, + Reason: reason, + Message: message, + } +} + +func (m *CRDRuntimeManager) dynamicClientForCluster(clusterID string) (dynamic.Interface, error) { + base := m.baseLoopbackConfig() + if base == nil { + return nil, fmt.Errorf("base apiextensions loopback config is required") + } + cfg := rest.CopyConfig(base) + host, err := mc.ClusterHost(cfg.Host, mc.Options{ + PathPrefix: m.opts.PathPrefix, + ControlPlaneSegment: m.opts.ControlPlaneSegment, + }, clusterID) + if err != nil { + return nil, err + } + cfg.Host = host + return dynamic.NewForConfig(cfg) +} + diff --git a/pkg/multicluster/bootstrap/crd_serves_index.go b/pkg/multicluster/bootstrap/crd_serves_index.go index 6b27a9f..e613011 100644 --- a/pkg/multicluster/bootstrap/crd_serves_index.go +++ b/pkg/multicluster/bootstrap/crd_serves_index.go @@ -49,8 +49,8 @@ func (i *CRDServesIndex) RebuildCluster(clusterID string, objs []interface{}) { i.crdKeys[clusterID] = map[string][]string{} for _, obj := range objs { - crd, ok := crdFromObj(obj) - if !ok { + crd, ok := obj.(*apiextensionsv1.CustomResourceDefinition) + if !ok || crd == nil { continue } keys := ServedKeysForCRD(clusterID, crd) diff --git a/pkg/multicluster/bootstrap/kubernetesservice_controller.go b/pkg/multicluster/bootstrap/kubernetesservice_controller.go index b34f261..2e679aa 100644 --- a/pkg/multicluster/bootstrap/kubernetesservice_controller.go +++ b/pkg/multicluster/bootstrap/kubernetesservice_controller.go @@ -138,6 +138,11 @@ func (c *kubernetesServiceController) reconcile() error { } epClient := c.client.CoreV1().Endpoints(metav1.NamespaceDefault) + // Endpoint objects reject loopback addresses. In local envtest setups the + // loopback listener is expected; skip endpoint reconciliation in that case. + if c.publicIP != nil && c.publicIP.IsLoopback() { + return nil + } want := &corev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "kubernetes", diff --git a/pkg/multicluster/clientpool.go b/pkg/multicluster/clientpool.go index dce85cc..fb9083b 100644 --- a/pkg/multicluster/clientpool.go +++ b/pkg/multicluster/clientpool.go @@ -9,6 +9,10 @@ import ( "k8s.io/client-go/rest" ) +// ErrMissingClientFactory is returned when a manager requires a client pool +// but none was configured. +var ErrMissingClientFactory = fmt.Errorf("missing client factory for cluster operations") + // ClientPool caches per-cluster REST clients/transports to avoid rebuilding // client-go machinery for every request. type ClientPool struct { diff --git a/pkg/multicluster/informer_registry.go b/pkg/multicluster/informer_registry.go new file mode 100644 index 0000000..f27f2d6 --- /dev/null +++ b/pkg/multicluster/informer_registry.go @@ -0,0 +1,83 @@ +package multicluster + +import ( + "context" + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + + "github.com/kplane-dev/informer" +) + +// InformerRegistry bridges storage creation (lazy, per-resource) and informer +// consumption (by managers). It creates MultiClusterInformer instances on demand, +// backed by the raw cacher for each resource type. +type InformerRegistry struct { + ctx context.Context + mu sync.Mutex + storages map[schema.GroupResource]*clusteredStorage + informers map[schema.GroupResource]*informer.MultiClusterInformer +} + +// NewInformerRegistry creates a new InformerRegistry. The context controls the +// lifetime of all MultiClusterInformers created by this registry. +func NewInformerRegistry(ctx context.Context) *InformerRegistry { + return &InformerRegistry{ + ctx: ctx, + storages: make(map[schema.GroupResource]*clusteredStorage), + informers: make(map[schema.GroupResource]*informer.MultiClusterInformer), + } +} + +// RegisterStorage is called by newClusteredStorage() to register the storage +// for a resource type. The actual cacher is created lazily via ensureStore(). +func (r *InformerRegistry) RegisterStorage(gr schema.GroupResource, cs *clusteredStorage) { + r.mu.Lock() + defer r.mu.Unlock() + r.storages[gr] = cs + klog.V(2).Infof("mc.informerRegistry registered storage for %s", gr) +} + +// Get returns (or creates) the MultiClusterInformer for a resource. +// Forces cacher creation via ensureStore() if needed. +func (r *InformerRegistry) Get(gr schema.GroupResource) (*informer.MultiClusterInformer, error) { + r.mu.Lock() + if mci, ok := r.informers[gr]; ok { + r.mu.Unlock() + return mci, nil + } + cs, ok := r.storages[gr] + r.mu.Unlock() + + if !ok { + return nil, fmt.Errorf("no registered storage for %s", gr) + } + + // ensureStore() acquires its own lock; call outside our lock to avoid nesting. + store, err := cs.ensureStore() + if err != nil { + return nil, fmt.Errorf("failed to ensure store for %s: %w", gr, err) + } + + r.mu.Lock() + defer r.mu.Unlock() + + // Double-check after re-acquiring lock. + if mci, ok := r.informers[gr]; ok { + return mci, nil + } + + mci := informer.New(informer.Config{ + Storage: store, + ResourcePrefix: cs.kindRootPrefix(), + GroupResource: gr, + }) + + go mci.Run(r.ctx) + + r.informers[gr] = mci + klog.V(2).Infof("mc.informerRegistry created MultiClusterInformer for %s prefix=%s", gr, cs.kindRootPrefix()) + return mci, nil +} diff --git a/pkg/multicluster/informerpool.go b/pkg/multicluster/informerpool.go deleted file mode 100644 index d542927..0000000 --- a/pkg/multicluster/informerpool.go +++ /dev/null @@ -1,111 +0,0 @@ -package multicluster - -import ( - "fmt" - "sync" - "time" - - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" -) - -type InformerPoolOptions struct { - ClientForCluster func(clusterID string) (kubernetes.Interface, error) - ResyncPeriod time.Duration - StopCh <-chan struct{} - StartOnGet bool -} - -type InformerPool struct { - opts InformerPoolOptions - - mu sync.Mutex - clusters map[string]*informerEntry -} - -type informerEntry struct { - clientset kubernetes.Interface - factory informers.SharedInformerFactory - stopCh <-chan struct{} - ownedCh chan struct{} -} - -func NewInformerPool(opts InformerPoolOptions) *InformerPool { - if opts.StartOnGet == false { - // keep explicit - } else { - opts.StartOnGet = true - } - return &InformerPool{ - opts: opts, - clusters: map[string]*informerEntry{}, - } -} - -func NewInformerPoolFromClientPool(pool *ClientPool, resync time.Duration, stopCh <-chan struct{}) *InformerPool { - return NewInformerPool(InformerPoolOptions{ - ClientForCluster: pool.KubeClientForCluster, - ResyncPeriod: resync, - StopCh: stopCh, - StartOnGet: true, - }) -} - -func (p *InformerPool) Get(clusterID string) (kubernetes.Interface, informers.SharedInformerFactory, <-chan struct{}, error) { - p.mu.Lock() - defer p.mu.Unlock() - - if entry, ok := p.clusters[clusterID]; ok { - if p.opts.StartOnGet { - entry.start() - } - return entry.clientset, entry.factory, entry.stopCh, nil - } - if p.opts.ClientForCluster == nil { - return nil, nil, nil, ErrMissingClientFactory - } - cs, err := p.opts.ClientForCluster(clusterID) - if err != nil { - return nil, nil, nil, err - } - factory := informers.NewSharedInformerFactory(cs, p.opts.ResyncPeriod) - stopCh := p.opts.StopCh - var ownedCh chan struct{} - if stopCh == nil { - ownedCh = make(chan struct{}) - stopCh = ownedCh - } - entry := &informerEntry{ - clientset: cs, - factory: factory, - stopCh: stopCh, - ownedCh: ownedCh, - } - p.clusters[clusterID] = entry - if p.opts.StartOnGet { - entry.start() - } - return entry.clientset, entry.factory, entry.stopCh, nil -} - -func (e *informerEntry) start() { - // SharedInformerFactory.Start is idempotent and can be called repeatedly. - // Calling it on every Get ensures informers that are registered after the - // first Start call are also started. - e.factory.Start(e.stopCh) -} - -func (p *InformerPool) StopCluster(clusterID string) { - p.mu.Lock() - defer p.mu.Unlock() - entry, ok := p.clusters[clusterID] - if !ok { - return - } - if entry.ownedCh != nil { - close(entry.ownedCh) - } - delete(p.clusters, clusterID) -} - -var ErrMissingClientFactory = fmt.Errorf("missing client factory for informer pool") diff --git a/pkg/multicluster/informerpool_test.go b/pkg/multicluster/informerpool_test.go deleted file mode 100644 index d2b8b99..0000000 --- a/pkg/multicluster/informerpool_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package multicluster - -import ( - "testing" - - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" -) - -func TestInformerPool_ReusesFactoryPerCluster(t *testing.T) { - pool := NewInformerPool(InformerPoolOptions{ - ClientForCluster: func(clusterID string) (kubernetes.Interface, error) { - return fake.NewSimpleClientset(), nil - }, - StartOnGet: false, - }) - - _, f1, _, err := pool.Get("c1") - if err != nil { - t.Fatalf("Get c1: %v", err) - } - _, f1b, _, err := pool.Get("c1") - if err != nil { - t.Fatalf("Get c1 again: %v", err) - } - if f1 != f1b { - t.Fatalf("expected same informer factory for same cluster") - } - - _, f2, _, err := pool.Get("c2") - if err != nil { - t.Fatalf("Get c2: %v", err) - } - if f1 == f2 { - t.Fatalf("expected different informer factories for different clusters") - } - - if _, ok := f1.(informers.SharedInformerFactory); !ok { - t.Fatalf("expected shared informer factory type") - } -} diff --git a/pkg/multicluster/options.go b/pkg/multicluster/options.go index d0f017b..a69259f 100644 --- a/pkg/multicluster/options.go +++ b/pkg/multicluster/options.go @@ -12,7 +12,6 @@ import ( // EtcdPrefix must match the apiserver's configured etcd prefix (e.g. "/registry"). // DefaultCluster is used when no cluster can be extracted. // PathPrefix and ControlPlaneSegment define the URL form for path-based extraction. -// ClusterAnnotationKey is the server-owned annotation storing the cluster id. // ClusterFieldKey is a synthetic field name exposed via AttrFunc for selectors. // WatchStrategy selects shared kind-root or per-cluster watch behavior. // ClusterSource optionally provides cluster IDs for per-cluster watch. @@ -24,7 +23,6 @@ type Options struct { DefaultCluster string PathPrefix string ControlPlaneSegment string - ClusterAnnotationKey string ClusterFieldKey string WatchStrategy WatchStrategy ClusterSource ClusterSource @@ -33,6 +31,9 @@ type Options struct { OnClusterSelected func(clusterID string) // ServerName identifies the apiserver instance using this options set (for metrics/logging). ServerName string + // InformerRegistry provides MultiClusterInformer instances for resource types. + // Set by config.go and used by storage.go to register clusteredStorage instances. + InformerRegistry *InformerRegistry } // ResourceScope defines which keyspace view a request should use. @@ -54,7 +55,6 @@ const ( DefaultInternalCrossClusterUser = "system:apiserver" DefaultInternalCrossClusterCapability = "kplane.internal/cross-cluster-read" DefaultInternalCrossClusterUserAgent = "kplane-internal-cross-cluster" - DefaultClusterAnnotation = "multicluster.k8s.io/cluster" DefaultClusterField = "metadata.cluster" ) @@ -63,7 +63,6 @@ var DefaultOptions = Options{ DefaultCluster: DefaultClusterName, PathPrefix: DefaultPathPrefix, ControlPlaneSegment: DefaultControlPlaneSegment, - ClusterAnnotationKey: DefaultClusterAnnotation, ClusterFieldKey: DefaultClusterField, WatchStrategy: KindRootWatch{}, } diff --git a/pkg/multicluster/scopedinformer/shared.go b/pkg/multicluster/scopedinformer/shared.go deleted file mode 100644 index d23fb84..0000000 --- a/pkg/multicluster/scopedinformer/shared.go +++ /dev/null @@ -1,149 +0,0 @@ -package scopedinformer - -import ( - "context" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - - mc "github.com/kplane-dev/apiserver/pkg/multicluster" -) - -const ClusterIndexName = "mc.cluster" - -func NewAllClustersKubeClient(base *rest.Config) (kubernetes.Interface, error) { - if base == nil { - return nil, fmt.Errorf("base loopback config is required") - } - cfg := rest.CopyConfig(base) - cfg.Impersonate.UserName = mc.DefaultInternalCrossClusterUser - cfg.Impersonate.Groups = []string{"system:authenticated", "system:masters"} - if cfg.Impersonate.Extra == nil { - cfg.Impersonate.Extra = map[string][]string{} - } - cfg.Impersonate.Extra[mc.DefaultInternalCrossClusterCapability] = []string{"true"} - if cfg.UserAgent == "" { - cfg.UserAgent = mc.DefaultInternalCrossClusterUserAgent - } else { - cfg.UserAgent = mc.DefaultInternalCrossClusterUserAgent + " " + cfg.UserAgent - } - return kubernetes.NewForConfig(cfg) -} - -func EnsureClusterIndex(inf cache.SharedIndexInformer, clusterLabelKey string) error { - return inf.AddIndexers(cache.Indexers{ - ClusterIndexName: func(obj interface{}) ([]string, error) { - cid := ObjectCluster(obj, clusterLabelKey) - if cid == "" { - return nil, nil - } - return []string{cid}, nil - }, - }) -} - -func ObjectCluster(obj interface{}, clusterLabelKey string) string { - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = tombstone.Obj - } - acc, err := meta.Accessor(obj) - if err != nil { - return "" - } - return acc.GetLabels()[clusterLabelKey] -} - -func FilteredByCluster(indexer cache.Indexer, clusterID string) []interface{} { - items, err := indexer.ByIndex(ClusterIndexName, clusterID) - if err != nil { - return nil - } - return items -} - -func NewFilteredSharedIndexInformer(shared cache.SharedIndexInformer, clusterID, clusterLabelKey string) cache.SharedIndexInformer { - return &filteredSharedIndexInformer{shared: shared, clusterID: clusterID, clusterLabelKey: clusterLabelKey} -} - -type filteredSharedIndexInformer struct { - shared cache.SharedIndexInformer - clusterID string - clusterLabelKey string -} - -func (f *filteredSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) { - return f.shared.AddEventHandler(f.wrap(handler)) -} - -func (f *filteredSharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) (cache.ResourceEventHandlerRegistration, error) { - return f.shared.AddEventHandlerWithResyncPeriod(f.wrap(handler), resyncPeriod) -} - -func (f *filteredSharedIndexInformer) AddEventHandlerWithOptions(handler cache.ResourceEventHandler, options cache.HandlerOptions) (cache.ResourceEventHandlerRegistration, error) { - return f.shared.AddEventHandlerWithOptions(f.wrap(handler), options) -} - -func (f *filteredSharedIndexInformer) RemoveEventHandler(handle cache.ResourceEventHandlerRegistration) error { - return f.shared.RemoveEventHandler(handle) -} - -func (f *filteredSharedIndexInformer) GetStore() cache.Store { return f.shared.GetStore() } -func (f *filteredSharedIndexInformer) GetController() cache.Controller { - return f.shared.GetController() -} -func (f *filteredSharedIndexInformer) Run(stopCh <-chan struct{}) {} -func (f *filteredSharedIndexInformer) RunWithContext(ctx context.Context) {} -func (f *filteredSharedIndexInformer) HasSynced() bool { return f.shared.HasSynced() } -func (f *filteredSharedIndexInformer) LastSyncResourceVersion() string { - return f.shared.LastSyncResourceVersion() -} -func (f *filteredSharedIndexInformer) SetWatchErrorHandler(handler cache.WatchErrorHandler) error { - return f.shared.SetWatchErrorHandler(handler) -} -func (f *filteredSharedIndexInformer) SetWatchErrorHandlerWithContext(handler cache.WatchErrorHandlerWithContext) error { - return f.shared.SetWatchErrorHandlerWithContext(handler) -} -func (f *filteredSharedIndexInformer) SetTransform(handler cache.TransformFunc) error { - return f.shared.SetTransform(handler) -} -func (f *filteredSharedIndexInformer) IsStopped() bool { return f.shared.IsStopped() } -func (f *filteredSharedIndexInformer) AddIndexers(indexers cache.Indexers) error { - return f.shared.AddIndexers(indexers) -} -func (f *filteredSharedIndexInformer) GetIndexer() cache.Indexer { return f.shared.GetIndexer() } - -func (f *filteredSharedIndexInformer) wrap(handler cache.ResourceEventHandler) cache.ResourceEventHandler { - if handler == nil { - return cache.ResourceEventHandlerFuncs{} - } - return cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - if ObjectCluster(obj, f.clusterLabelKey) == f.clusterID { - handler.OnAdd(obj, false) - } - }, - UpdateFunc: func(oldObj, newObj interface{}) { - oldMatch := ObjectCluster(oldObj, f.clusterLabelKey) == f.clusterID - newMatch := ObjectCluster(newObj, f.clusterLabelKey) == f.clusterID - switch { - case oldMatch && newMatch: - handler.OnUpdate(oldObj, newObj) - case !oldMatch && newMatch: - handler.OnAdd(newObj, false) - case oldMatch && !newMatch: - handler.OnDelete(oldObj) - } - }, - DeleteFunc: func(obj interface{}) { - if ObjectCluster(obj, f.clusterLabelKey) == f.clusterID { - handler.OnDelete(obj) - } - }, - } -} - -var _ cache.SharedIndexInformer = (*filteredSharedIndexInformer)(nil) diff --git a/pkg/multicluster/storage.go b/pkg/multicluster/storage.go index a14a887..f78328d 100644 --- a/pkg/multicluster/storage.go +++ b/pkg/multicluster/storage.go @@ -12,7 +12,6 @@ import ( "sync" "sync/atomic" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" @@ -27,6 +26,8 @@ import ( "k8s.io/component-base/metrics/legacyregistry" "github.com/kplane-dev/apiserver/pkg/multicluster/internalcap" + mcstorage "github.com/kplane-dev/apiserver/pkg/multicluster/storage" + extstorage "github.com/kplane-dev/storage" ) // RESTOptionsDecorator wraps the underlying getter to inject a decorator that @@ -65,10 +66,15 @@ func (w RESTOptionsDecorator) GetRESTOptions(resource schema.GroupResource, exam if err != nil { return opts, err } - base := opts.Decorator - if base == nil { - base = generic.UndecoratedStorage - } + // Use StorageWithClusterIdentity as the base decorator instead of the + // default StorageWithCacher. This configures the cacher pipeline with + // identity hooks (IdentityFromKey, WrapWatchObject, UnwrapObject) and + // sets WrapDecodedObject on the etcd3 storage config so that decoded + // objects carry their storage key through the watch.Event boundary. + base := extstorage.StorageWithClusterIdentity(extstorage.DecoratorConfig{ + KeyLayout: extstorage.DefaultKeyLayout(), + GroupResource: resource, + }) base = wrapBaseDecorator(base, w.Options) opts.Decorator = func( config *storagebackend.ConfigForResource, @@ -158,6 +164,9 @@ func newClusteredStorage( indexers: indexers, options: options, } + if options.InformerRegistry != nil { + options.InformerRegistry.RegisterStorage(config.GroupResource, cs) + } return cs, cs.destroy, nil } @@ -173,7 +182,6 @@ func (c *clusteredStorage) Create(ctx context.Context, key string, obj, out runt if err := c.rejectAllClustersMutation(ctx); err != nil { return err } - c.enforceObjectClusterLabel(obj, c.clusterFromContext(ctx)) store, key, err := c.storeAndKey(ctx, key) if err != nil { return err @@ -197,15 +205,29 @@ func (c *clusteredStorage) Watch(ctx context.Context, key string, opts storage.L if err != nil { return nil, err } - return store.Watch(ctx, key, opts) + w, err := store.Watch(ctx, key, opts) + if err != nil { + return nil, err + } + return mcstorage.NewEntryWatch(w), nil } func (c *clusteredStorage) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error { - store, key, err := c.storeAndKey(ctx, key) + store, rewrittenKey, err := c.storeAndKey(ctx, key) if err != nil { return err } - return store.Get(ctx, key, opts, objPtr) + if entry, ok := objPtr.(*mcstorage.InternalEntry); ok && entry != nil && entry.Object != nil { + if err := store.Get(ctx, rewrittenKey, opts, entry.Object); err != nil { + return err + } + entry.StorageKey = rewrittenKey + if cid := extstorage.DefaultKeyLayout().ClusterFromKey(rewrittenKey); cid != "" { + entry.ClusterID = cid + } + return nil + } + return store.Get(ctx, rewrittenKey, opts, objPtr) } func (c *clusteredStorage) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error { @@ -220,20 +242,11 @@ func (c *clusteredStorage) GuaranteedUpdate(ctx context.Context, key string, des if err := c.rejectAllClustersMutation(ctx); err != nil { return err } - store, key, err := c.storeAndKey(ctx, key) + store, rewrittenKey, err := c.storeAndKey(ctx, key) if err != nil { return err } - cid := c.clusterFromContext(ctx) - wrappedUpdate := func(input runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { - outObj, ttl, err := tryUpdate(input, res) - if err != nil || outObj == nil { - return outObj, ttl, err - } - c.enforceObjectClusterLabel(outObj, cid) - return outObj, ttl, nil - } - return store.GuaranteedUpdate(ctx, key, destination, ignoreNotFound, precond, wrappedUpdate, cachedExistingObject) + return store.GuaranteedUpdate(ctx, rewrittenKey, destination, ignoreNotFound, precond, tryUpdate, cachedExistingObject) } func (c *clusteredStorage) Stats(ctx context.Context) (storage.Stats, error) { @@ -268,12 +281,12 @@ func (c *clusteredStorage) GetCurrentResourceVersion(ctx context.Context) (uint6 return store.GetCurrentResourceVersion(ctx) } -func (c *clusteredStorage) SetKeysFunc(keysFunc storage.KeysFunc) { +func (c *clusteredStorage) EnableResourceSizeEstimation(keysFunc storage.KeysFunc) error { store, err := c.ensureStore() if err != nil { - return + return err } - store.SetKeysFunc(keysFunc) + return store.EnableResourceSizeEstimation(keysFunc) } func (c *clusteredStorage) CompactRevision() int64 { @@ -343,52 +356,25 @@ func (c *clusteredStorage) defaultCluster() string { return DefaultClusterName } -func (c *clusteredStorage) clusterFromContext(ctx context.Context) string { - cid, _, _ := FromContextScope(ctx) - if cid == "" { - cid = c.defaultCluster() - } - return cid -} - -func (c *clusteredStorage) enforceObjectClusterLabel(obj runtime.Object, cid string) { - if obj == nil { - return - } - acc, err := meta.Accessor(obj) - if err != nil { - return - } - key := c.options.ClusterAnnotationKey - if key == "" { - key = DefaultClusterAnnotation - } - lbls := acc.GetLabels() - if lbls == nil { - lbls = map[string]string{} - } - lbls[key] = cid - acc.SetLabels(lbls) -} func (c *clusteredStorage) clusterFromObject(obj runtime.Object) string { - if obj == nil { - return c.defaultCluster() - } - acc, err := meta.Accessor(obj) - if err != nil { - return c.defaultCluster() - } - key := c.options.ClusterAnnotationKey - if key == "" { - key = DefaultClusterAnnotation - } - if cid := acc.GetLabels()[key]; cid != "" { - return cid + if entry, ok := obj.(*mcstorage.InternalEntry); ok && entry != nil { + if entry.ClusterID != "" { + return entry.ClusterID + } + if entry.StorageKey != "" { + if cid := extstorage.DefaultKeyLayout().ClusterFromKey(entry.StorageKey); cid != "" { + return cid + } + } + if entry.Object != nil { + obj = entry.Object + } } return c.defaultCluster() } + func (c *clusteredStorage) rewriteKey(cluster, key string) string { if cluster == "" { cluster = DefaultClusterName @@ -430,6 +416,14 @@ func (c *clusteredStorage) ensureStore() (storage.Interface, error) { seq, server, c.resourcePrefix, kindRootPrefix, cfg.Prefix, hex.EncodeToString(stackHash[:8]), ) keyFunc := func(obj runtime.Object) (string, error) { + if entry, ok := obj.(*mcstorage.InternalEntry); ok && entry != nil { + if entry.StorageKey != "" { + return entry.StorageKey, nil + } + if entry.Object != nil { + obj = entry.Object + } + } key, err := c.keyFunc(obj) if err != nil { return "", err @@ -444,5 +438,5 @@ func (c *clusteredStorage) ensureStore() (storage.Interface, error) { } c.store = store c.destroyFn = destroy - return store, nil + return c.store, nil } diff --git a/pkg/multicluster/storage/entry.go b/pkg/multicluster/storage/entry.go new file mode 100644 index 0000000..893e7d1 --- /dev/null +++ b/pkg/multicluster/storage/entry.go @@ -0,0 +1,13 @@ +package storage + +import ( + extstorage "github.com/kplane-dev/storage" +) + +// InternalEntry is a type alias for the canonical ObjectWithClusterIdentity +// from the kplane-dev/storage module. It carries cluster identity metadata +// for storage/cache paths and is never serialized to API clients. +// +// The alias preserves backward compatibility: existing type assertions like +// *mcstorage.InternalEntry continue to work since Go aliases are identical types. +type InternalEntry = extstorage.ObjectWithClusterIdentity diff --git a/pkg/multicluster/storage/identity_integration_test.go b/pkg/multicluster/storage/identity_integration_test.go new file mode 100644 index 0000000..55f9ba2 --- /dev/null +++ b/pkg/multicluster/storage/identity_integration_test.go @@ -0,0 +1,161 @@ +package storage + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/watch" + + extstorage "github.com/kplane-dev/storage" +) + +// TestInternalEntry_IsObjectWithClusterIdentity confirms the type alias works: +// InternalEntry and ObjectWithClusterIdentity are the same type. +func TestInternalEntry_IsObjectWithClusterIdentity(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: "default", + }, + } + + // Create as external ObjectWithClusterIdentity + ext := &extstorage.ObjectWithClusterIdentity{ + Object: pod, + ClusterID: "c1", + } + + // Assign to InternalEntry (alias — same type, zero-cost) + var ie *InternalEntry = ext + + if ie.ClusterID != "c1" { + t.Errorf("ClusterID mismatch: got %q, want c1", ie.ClusterID) + } + if ie.Object != pod { + t.Error("Object mismatch after alias assignment") + } +} + +// TestObjectWithClusterIdentity_ImplementsMetaAccessor verifies that the +// external type (used via alias) properly delegates metav1.Object methods. +func TestObjectWithClusterIdentity_ImplementsMetaAccessor(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: "default", + ResourceVersion: "42", + Labels: map[string]string{"app": "web"}, + }, + } + + entry := &InternalEntry{ + Object: pod, + ClusterID: "c1", + } + + // meta.Accessor should work on the entry because identity_meta.go + // delegates to the inner object + acc, err := meta.Accessor(entry) + if err != nil { + t.Fatalf("meta.Accessor failed: %v", err) + } + if acc.GetName() != "nginx" { + t.Errorf("GetName() = %q, want nginx", acc.GetName()) + } + if acc.GetNamespace() != "default" { + t.Errorf("GetNamespace() = %q, want default", acc.GetNamespace()) + } + if acc.GetResourceVersion() != "42" { + t.Errorf("GetResourceVersion() = %q, want 42", acc.GetResourceVersion()) + } + if acc.GetLabels()["app"] != "web" { + t.Errorf("GetLabels() missing app=web") + } +} + +// TestEntryWatch_UnwrapsObjectWithClusterIdentity verifies that EntryWatch +// properly unwraps the ObjectWithClusterIdentity envelope (via alias). +func TestEntryWatch_UnwrapsObjectWithClusterIdentity(t *testing.T) { + fakeWatcher := watch.NewFake() + + ew := NewEntryWatch(fakeWatcher) + defer ew.Stop() + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: "default", + }, + } + + // Send an event with the ObjectWithClusterIdentity envelope + go func() { + fakeWatcher.Add(&InternalEntry{ + Object: pod, + ClusterID: "c1", + }) + }() + + event := <-ew.ResultChan() + if event.Type != watch.Added { + t.Fatalf("expected Added, got %v", event.Type) + } + + // The event object should be the inner pod, not the envelope + unwrapped, ok := event.Object.(*corev1.Pod) + if !ok { + t.Fatalf("expected *corev1.Pod, got %T", event.Object) + } + if unwrapped.Name != "nginx" { + t.Errorf("name = %q, want nginx", unwrapped.Name) + } +} + +// TestNoLabelsOnUnwrappedObjects confirms that the ObjectWithClusterIdentity +// envelope does not inject any labels or annotations onto the inner object. +func TestNoLabelsOnUnwrappedObjects(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: "default", + }, + } + + entry := &InternalEntry{ + Object: pod, + ClusterID: "c1", + StorageKey: "/pods/clusters/c1/default/nginx", + } + + // Verify no labels or annotations were added to the inner object + if len(pod.Labels) != 0 { + t.Errorf("inner object has labels: %v", pod.Labels) + } + if len(pod.Annotations) != 0 { + t.Errorf("inner object has annotations: %v", pod.Annotations) + } + + // Access through entry — still no labels/annotations + acc, err := meta.Accessor(entry) + if err != nil { + t.Fatalf("meta.Accessor: %v", err) + } + if len(acc.GetLabels()) != 0 { + t.Errorf("envelope accessor returned labels: %v", acc.GetLabels()) + } + if len(acc.GetAnnotations()) != 0 { + t.Errorf("envelope accessor returned annotations: %v", acc.GetAnnotations()) + } + + // Deep copy — still clean + copied := entry.DeepCopyObject().(*InternalEntry) + innerPod := copied.Object.(*corev1.Pod) + if len(innerPod.Labels) != 0 { + t.Errorf("deep-copied inner object has labels: %v", innerPod.Labels) + } + if len(innerPod.Annotations) != 0 { + t.Errorf("deep-copied inner object has annotations: %v", innerPod.Annotations) + } +} diff --git a/pkg/multicluster/storage/keyaware_watch.go b/pkg/multicluster/storage/keyaware_watch.go new file mode 100644 index 0000000..0c473c9 --- /dev/null +++ b/pkg/multicluster/storage/keyaware_watch.go @@ -0,0 +1,50 @@ +package storage + +import ( + "k8s.io/apimachinery/pkg/watch" +) + +// EntryWatch adapts a watch stream that carries InternalEntry objects into +// a stream that emits only runtime objects to API callers. +type EntryWatch struct { + source watch.Interface + resultCh chan watch.Event +} + +func NewEntryWatch(source watch.Interface) watch.Interface { + if source == nil { + return nil + } + w := &EntryWatch{ + source: source, + resultCh: make(chan watch.Event), + } + go w.run() + return w +} + +func (w *EntryWatch) Stop() { + if w.source != nil { + w.source.Stop() + } +} + +func (w *EntryWatch) ResultChan() <-chan watch.Event { + return w.resultCh +} + +func (w *EntryWatch) run() { + defer close(w.resultCh) + for evt := range w.source.ResultChan() { + entry, ok := evt.Object.(*InternalEntry) + if !ok || entry == nil { + w.resultCh <- evt + continue + } + if entry.Object == nil { + continue + } + w.resultCh <- watch.Event{Type: evt.Type, Object: entry.Object} + } +} + diff --git a/pkg/multicluster/storage/keyaware_watch_test.go b/pkg/multicluster/storage/keyaware_watch_test.go new file mode 100644 index 0000000..9406e92 --- /dev/null +++ b/pkg/multicluster/storage/keyaware_watch_test.go @@ -0,0 +1,34 @@ +package storage + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" +) + +func TestNewEntryWatch_EmitsRuntimeObjects(t *testing.T) { + fw := watch.NewFake() + defer fw.Stop() + + out := NewEntryWatch(fw) + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm"}} + fw.Add(&InternalEntry{Object: cm, ClusterID: "c1", StorageKey: "/registry/configmaps/clusters/c1/default/cm"}) + + evt, ok := <-out.ResultChan() + if !ok { + t.Fatalf("expected watch event") + } + if evt.Type != watch.Added { + t.Fatalf("event type=%s want=%s", evt.Type, watch.Added) + } + got, ok := evt.Object.(*corev1.ConfigMap) + if !ok { + t.Fatalf("expected ConfigMap object, got %T", evt.Object) + } + if got.Name != "cm" { + t.Fatalf("name=%q want=cm", got.Name) + } +} + diff --git a/pkg/multicluster/storage_test.go b/pkg/multicluster/storage_test.go index fc5398e..d3a1ded 100644 --- a/pkg/multicluster/storage_test.go +++ b/pkg/multicluster/storage_test.go @@ -44,7 +44,7 @@ func (f *fakeStorage) RequestWatchProgress(context.Context) error { return nil func (f *fakeStorage) GetCurrentResourceVersion(context.Context) (uint64, error) { return 0, nil } -func (f *fakeStorage) SetKeysFunc(storage.KeysFunc) {} +func (f *fakeStorage) EnableResourceSizeEstimation(storage.KeysFunc) error { return nil } func (f *fakeStorage) CompactRevision() int64 { return 0 } type recordingStorage struct { @@ -272,8 +272,8 @@ func TestCreateEnforcesClusterLabel(t *testing.T) { if !ok || got == nil { t.Fatalf("expected recorded configmap create object") } - if got.Labels[DefaultClusterAnnotation] != "c1" { - t.Fatalf("expected cluster label c1, got %q", got.Labels[DefaultClusterAnnotation]) + if got.Name != "cm" { + t.Fatalf("expected created object name 'cm', got %q", got.Name) } } @@ -328,7 +328,7 @@ func TestGuaranteedUpdateEnforcesClusterLabel(t *testing.T) { if !ok || got == nil { t.Fatalf("expected recorded updated configmap") } - if got.Labels[DefaultClusterAnnotation] != "c2" { - t.Fatalf("expected cluster label c2, got %q", got.Labels[DefaultClusterAnnotation]) + if got.Name != "cm" { + t.Fatalf("expected updated object name 'cm', got %q", got.Name) } } diff --git a/pkg/multicluster/typedinformer/adapters.go b/pkg/multicluster/typedinformer/adapters.go new file mode 100644 index 0000000..12629cf --- /dev/null +++ b/pkg/multicluster/typedinformer/adapters.go @@ -0,0 +1,482 @@ +package typedinformer + +import ( + "fmt" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" + corelisters "k8s.io/client-go/listers/core/v1" + discoverylisters "k8s.io/client-go/listers/discovery/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/api/legacyscheme" + + "github.com/kplane-dev/informer" +) + +// convert attempts a direct type assertion, falling back to scheme conversion +// for internal→versioned type conversion since the cacher stores internal types. +func convert[T any, PT interface { + *T + runtime.Object +}](obj runtime.Object) (PT, error) { + if typed, ok := obj.(PT); ok { + return typed, nil + } + out := PT(new(T)) + if err := legacyscheme.Scheme.Convert(obj, out, nil); err != nil { + return nil, fmt.Errorf("cannot convert %T: %w", obj, err) + } + return out, nil +} + +// --- Namespace --- + +type namespaceLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewNamespaceLister(mci *informer.MultiClusterInformer, cluster string) corelisters.NamespaceLister { + return &namespaceLister{mci: mci, cluster: cluster} +} + +func (l *namespaceLister) List(sel labels.Selector) (ret []*corev1.Namespace, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + ns, err := convert[corev1.Namespace](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(ns.Labels)) { + ret = append(ret, ns) + } + } + return ret, nil +} + +func (l *namespaceLister) Get(name string) (*corev1.Namespace, error) { + obj, ok := l.mci.Get(l.cluster, "", name) + if !ok { + return nil, fmt.Errorf("namespace %q not found", name) + } + return convert[corev1.Namespace](obj) +} + +type namespaceInformerAdapter struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewNamespaceInformer(mci *informer.MultiClusterInformer, cluster string) NamespaceInformer { + return &namespaceInformerAdapter{mci: mci, cluster: cluster} +} + +type NamespaceInformer interface { + Informer() cache.SharedIndexInformer + Lister() corelisters.NamespaceLister +} + +func (a *namespaceInformerAdapter) Informer() cache.SharedIndexInformer { + return a.mci.ForCluster(a.cluster) +} + +func (a *namespaceInformerAdapter) Lister() corelisters.NamespaceLister { + return NewNamespaceLister(a.mci, a.cluster) +} + +// --- Secret --- + +type secretLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewSecretLister(mci *informer.MultiClusterInformer, cluster string) corelisters.SecretLister { + return &secretLister{mci: mci, cluster: cluster} +} + +func (l *secretLister) List(sel labels.Selector) (ret []*corev1.Secret, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + s, err := convert[corev1.Secret](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(s.Labels)) { + ret = append(ret, s) + } + } + return ret, nil +} + +func (l *secretLister) Secrets(ns string) corelisters.SecretNamespaceLister { + return &secretNamespaceLister{parent: l, namespace: ns} +} + +type secretNamespaceLister struct { + parent *secretLister + namespace string +} + +func (l *secretNamespaceLister) List(sel labels.Selector) (ret []*corev1.Secret, err error) { + all, _ := l.parent.List(sel) + for _, obj := range all { + if obj.Namespace == l.namespace { + ret = append(ret, obj) + } + } + return ret, nil +} + +func (l *secretNamespaceLister) Get(name string) (*corev1.Secret, error) { + obj, ok := l.parent.mci.Get(l.parent.cluster, l.namespace, name) + if !ok { + return nil, fmt.Errorf("secret %s/%s not found", l.namespace, name) + } + return convert[corev1.Secret](obj) +} + +// --- ServiceAccount --- + +type serviceAccountLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewServiceAccountLister(mci *informer.MultiClusterInformer, cluster string) corelisters.ServiceAccountLister { + return &serviceAccountLister{mci: mci, cluster: cluster} +} + +func (l *serviceAccountLister) List(sel labels.Selector) (ret []*corev1.ServiceAccount, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + sa, err := convert[corev1.ServiceAccount](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(sa.Labels)) { + ret = append(ret, sa) + } + } + return ret, nil +} + +func (l *serviceAccountLister) ServiceAccounts(ns string) corelisters.ServiceAccountNamespaceLister { + return &serviceAccountNamespaceLister{parent: l, namespace: ns} +} + +type serviceAccountNamespaceLister struct { + parent *serviceAccountLister + namespace string +} + +func (l *serviceAccountNamespaceLister) List(sel labels.Selector) (ret []*corev1.ServiceAccount, err error) { + all, _ := l.parent.List(sel) + for _, obj := range all { + if obj.Namespace == l.namespace { + ret = append(ret, obj) + } + } + return ret, nil +} + +func (l *serviceAccountNamespaceLister) Get(name string) (*corev1.ServiceAccount, error) { + obj, ok := l.parent.mci.Get(l.parent.cluster, l.namespace, name) + if !ok { + return nil, fmt.Errorf("serviceaccount %s/%s not found", l.namespace, name) + } + return convert[corev1.ServiceAccount](obj) +} + +// --- Pod --- + +type podLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewPodLister(mci *informer.MultiClusterInformer, cluster string) corelisters.PodLister { + return &podLister{mci: mci, cluster: cluster} +} + +func (l *podLister) List(sel labels.Selector) (ret []*corev1.Pod, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + p, err := convert[corev1.Pod](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(p.Labels)) { + ret = append(ret, p) + } + } + return ret, nil +} + +func (l *podLister) Pods(ns string) corelisters.PodNamespaceLister { + return &podNamespaceLister{parent: l, namespace: ns} +} + +type podNamespaceLister struct { + parent *podLister + namespace string +} + +func (l *podNamespaceLister) List(sel labels.Selector) (ret []*corev1.Pod, err error) { + all, _ := l.parent.List(sel) + for _, obj := range all { + if obj.Namespace == l.namespace { + ret = append(ret, obj) + } + } + return ret, nil +} + +func (l *podNamespaceLister) Get(name string) (*corev1.Pod, error) { + obj, ok := l.parent.mci.Get(l.parent.cluster, l.namespace, name) + if !ok { + return nil, fmt.Errorf("pod %s/%s not found", l.namespace, name) + } + return convert[corev1.Pod](obj) +} + +// --- Node --- + +type nodeLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewNodeLister(mci *informer.MultiClusterInformer, cluster string) corelisters.NodeLister { + return &nodeLister{mci: mci, cluster: cluster} +} + +func (l *nodeLister) List(sel labels.Selector) (ret []*corev1.Node, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + n, err := convert[corev1.Node](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(n.Labels)) { + ret = append(ret, n) + } + } + return ret, nil +} + +func (l *nodeLister) Get(name string) (*corev1.Node, error) { + obj, ok := l.mci.Get(l.cluster, "", name) + if !ok { + return nil, fmt.Errorf("node %q not found", name) + } + return convert[corev1.Node](obj) +} + +// --- Service --- + +type serviceLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewServiceLister(mci *informer.MultiClusterInformer, cluster string) corelisters.ServiceLister { + return &serviceLister{mci: mci, cluster: cluster} +} + +func (l *serviceLister) List(sel labels.Selector) (ret []*corev1.Service, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + svc, err := convert[corev1.Service](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(svc.Labels)) { + ret = append(ret, svc) + } + } + return ret, nil +} + +func (l *serviceLister) Services(ns string) corelisters.ServiceNamespaceLister { + return &serviceNamespaceLister{parent: l, namespace: ns} +} + +type serviceNamespaceLister struct { + parent *serviceLister + namespace string +} + +func (l *serviceNamespaceLister) List(sel labels.Selector) (ret []*corev1.Service, err error) { + all, _ := l.parent.List(sel) + for _, obj := range all { + if obj.Namespace == l.namespace { + ret = append(ret, obj) + } + } + return ret, nil +} + +func (l *serviceNamespaceLister) Get(name string) (*corev1.Service, error) { + obj, ok := l.parent.mci.Get(l.parent.cluster, l.namespace, name) + if !ok { + return nil, fmt.Errorf("service %s/%s not found", l.namespace, name) + } + return convert[corev1.Service](obj) +} + +// --- EndpointSlice --- + +type endpointSliceLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewEndpointSliceLister(mci *informer.MultiClusterInformer, cluster string) discoverylisters.EndpointSliceLister { + return &endpointSliceLister{mci: mci, cluster: cluster} +} + +func (l *endpointSliceLister) List(sel labels.Selector) (ret []*discoveryv1.EndpointSlice, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + ep, err := convert[discoveryv1.EndpointSlice](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(ep.Labels)) { + ret = append(ret, ep) + } + } + return ret, nil +} + +func (l *endpointSliceLister) EndpointSlices(ns string) discoverylisters.EndpointSliceNamespaceLister { + return &endpointSliceNamespaceLister{parent: l, namespace: ns} +} + +type endpointSliceNamespaceLister struct { + parent *endpointSliceLister + namespace string +} + +func (l *endpointSliceNamespaceLister) List(sel labels.Selector) (ret []*discoveryv1.EndpointSlice, err error) { + all, _ := l.parent.List(sel) + for _, obj := range all { + if obj.Namespace == l.namespace { + ret = append(ret, obj) + } + } + return ret, nil +} + +func (l *endpointSliceNamespaceLister) Get(name string) (*discoveryv1.EndpointSlice, error) { + obj, ok := l.parent.mci.Get(l.parent.cluster, l.namespace, name) + if !ok { + return nil, fmt.Errorf("endpointslice %s/%s not found", l.namespace, name) + } + return convert[discoveryv1.EndpointSlice](obj) +} + +// --- MutatingWebhookConfiguration --- + +type mutatingWebhookConfigurationLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewMutatingWebhookConfigurationLister(mci *informer.MultiClusterInformer, cluster string) admissionregistrationlisters.MutatingWebhookConfigurationLister { + return &mutatingWebhookConfigurationLister{mci: mci, cluster: cluster} +} + +func (l *mutatingWebhookConfigurationLister) List(sel labels.Selector) (ret []*admissionregistrationv1.MutatingWebhookConfiguration, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + mwc, err := convert[admissionregistrationv1.MutatingWebhookConfiguration](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(mwc.Labels)) { + ret = append(ret, mwc) + } + } + return ret, nil +} + +func (l *mutatingWebhookConfigurationLister) Get(name string) (*admissionregistrationv1.MutatingWebhookConfiguration, error) { + obj, ok := l.mci.Get(l.cluster, "", name) + if !ok { + return nil, fmt.Errorf("mutatingwebhookconfiguration %q not found", name) + } + return convert[admissionregistrationv1.MutatingWebhookConfiguration](obj) +} + +// --- ValidatingWebhookConfiguration --- + +type validatingWebhookConfigurationLister struct { + mci *informer.MultiClusterInformer + cluster string +} + +func NewValidatingWebhookConfigurationLister(mci *informer.MultiClusterInformer, cluster string) admissionregistrationlisters.ValidatingWebhookConfigurationLister { + return &validatingWebhookConfigurationLister{mci: mci, cluster: cluster} +} + +func (l *validatingWebhookConfigurationLister) List(sel labels.Selector) (ret []*admissionregistrationv1.ValidatingWebhookConfiguration, err error) { + if sel == nil { + sel = labels.Everything() + } + for _, obj := range l.mci.List(l.cluster) { + vwc, err := convert[admissionregistrationv1.ValidatingWebhookConfiguration](obj) + if err != nil { + continue + } + if sel.Matches(labels.Set(vwc.Labels)) { + ret = append(ret, vwc) + } + } + return ret, nil +} + +func (l *validatingWebhookConfigurationLister) Get(name string) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) { + obj, ok := l.mci.Get(l.cluster, "", name) + if !ok { + return nil, fmt.Errorf("validatingwebhookconfiguration %q not found", name) + } + return convert[admissionregistrationv1.ValidatingWebhookConfiguration](obj) +} + +// --- Lister interface assertions --- + +var ( + _ corelisters.NamespaceLister = (*namespaceLister)(nil) + _ corelisters.SecretLister = (*secretLister)(nil) + _ corelisters.ServiceAccountLister = (*serviceAccountLister)(nil) + _ corelisters.PodLister = (*podLister)(nil) + _ corelisters.NodeLister = (*nodeLister)(nil) + _ corelisters.ServiceLister = (*serviceLister)(nil) + + _ discoverylisters.EndpointSliceLister = (*endpointSliceLister)(nil) + + _ admissionregistrationlisters.MutatingWebhookConfigurationLister = (*mutatingWebhookConfigurationLister)(nil) + _ admissionregistrationlisters.ValidatingWebhookConfigurationLister = (*validatingWebhookConfigurationLister)(nil) +) diff --git a/pkg/multicluster/typedinformer/factory.go b/pkg/multicluster/typedinformer/factory.go new file mode 100644 index 0000000..4848756 --- /dev/null +++ b/pkg/multicluster/typedinformer/factory.go @@ -0,0 +1,294 @@ +package typedinformer + +import ( + "context" + "reflect" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + admissionregistrationinformers "k8s.io/client-go/informers/admissionregistration" + admissionregistrationinformersv1 "k8s.io/client-go/informers/admissionregistration/v1" + admissionregistrationinformersv1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1" + admissionregistrationinformersv1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" + apiserverinternalinformers "k8s.io/client-go/informers/apiserverinternal" + appsinformers "k8s.io/client-go/informers/apps" + autoscalinginformers "k8s.io/client-go/informers/autoscaling" + batchinformers "k8s.io/client-go/informers/batch" + certificatesinformers "k8s.io/client-go/informers/certificates" + coordinationinformers "k8s.io/client-go/informers/coordination" + coreinformers "k8s.io/client-go/informers/core" + coreinformersv1 "k8s.io/client-go/informers/core/v1" + discoveryinformers "k8s.io/client-go/informers/discovery" + discoveryinformersv1 "k8s.io/client-go/informers/discovery/v1" + discoveryinformersv1beta1 "k8s.io/client-go/informers/discovery/v1beta1" + eventsinformers "k8s.io/client-go/informers/events" + extensionsinformers "k8s.io/client-go/informers/extensions" + flowcontrolinformers "k8s.io/client-go/informers/flowcontrol" + internalinformers "k8s.io/client-go/informers/internalinterfaces" + networkinginformers "k8s.io/client-go/informers/networking" + nodeinformers "k8s.io/client-go/informers/node" + policyinformers "k8s.io/client-go/informers/policy" + rbacinformers "k8s.io/client-go/informers/rbac" + resourceinformers "k8s.io/client-go/informers/resource" + schedulinginformers "k8s.io/client-go/informers/scheduling" + storageinformers "k8s.io/client-go/informers/storage" + storagemigrationinformers "k8s.io/client-go/informers/storagemigration" + admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1" + corelisters "k8s.io/client-go/listers/core/v1" + discoverylisters "k8s.io/client-go/listers/discovery/v1" + "k8s.io/client-go/tools/cache" + + mcinformer "github.com/kplane-dev/informer" +) + +// MCIFactoryConfig configures the SharedInformerFactory adapter backed by MCIs. +// Only populate the MCIs for resource types your consumers need. +type MCIFactoryConfig struct { + ClusterID string + Namespaces *mcinformer.MultiClusterInformer + Services *mcinformer.MultiClusterInformer + EndpointSlices *mcinformer.MultiClusterInformer + MutatingWebhooks *mcinformer.MultiClusterInformer + ValidatingWebhooks *mcinformer.MultiClusterInformer +} + +// NewMCIFactory creates a SharedInformerFactory backed by MultiClusterInformers. +// MCIs are already running; Start() and WaitForCacheSync() delegate to MCI HasSynced. +func NewMCIFactory(cfg MCIFactoryConfig) informers.SharedInformerFactory { + return &mciFactory{cfg: cfg} +} + +type mciFactory struct { + cfg MCIFactoryConfig +} + +func (f *mciFactory) Start(_ <-chan struct{}) {} +func (f *mciFactory) Shutdown() {} + +func (f *mciFactory) StartWithContext(_ context.Context) {} + +func (f *mciFactory) InformerName() *cache.InformerName { + return nil +} + +func (f *mciFactory) WaitForCacheSync(_ <-chan struct{}) map[reflect.Type]bool { + out := map[reflect.Type]bool{} + if f.cfg.Namespaces != nil { + out[reflect.TypeOf(&corev1.Namespace{})] = f.cfg.Namespaces.HasSynced() + } + if f.cfg.Services != nil { + out[reflect.TypeOf(&corev1.Service{})] = f.cfg.Services.HasSynced() + } + if f.cfg.EndpointSlices != nil { + out[reflect.TypeOf(&discoveryv1.EndpointSlice{})] = f.cfg.EndpointSlices.HasSynced() + } + if f.cfg.MutatingWebhooks != nil { + out[reflect.TypeOf(&admissionregistrationv1.MutatingWebhookConfiguration{})] = f.cfg.MutatingWebhooks.HasSynced() + } + if f.cfg.ValidatingWebhooks != nil { + out[reflect.TypeOf(&admissionregistrationv1.ValidatingWebhookConfiguration{})] = f.cfg.ValidatingWebhooks.HasSynced() + } + return out +} + +func (f *mciFactory) WaitForCacheSyncWithContext(_ context.Context) cache.SyncResult { + return cache.SyncResult{} +} + +func (f *mciFactory) ForResource(_ schema.GroupVersionResource) (informers.GenericInformer, error) { + return nil, nil +} + +func (f *mciFactory) InformerFor(_ runtime.Object, _ internalinformers.NewInformerFunc) cache.SharedIndexInformer { + return nil +} + +// Group accessors — only Core, Discovery, and Admissionregistration are populated. + +func (f *mciFactory) Core() coreinformers.Interface { + return &mciCoreGroup{f: f} +} + +func (f *mciFactory) Discovery() discoveryinformers.Interface { + return &mciDiscoveryGroup{f: f} +} + +func (f *mciFactory) Admissionregistration() admissionregistrationinformers.Interface { + return &mciAdmissionregistrationGroup{f: f} +} + +// Unsupported groups — return nil; will panic if called by consumer code. +func (f *mciFactory) Internal() apiserverinternalinformers.Interface { return nil } +func (f *mciFactory) Apps() appsinformers.Interface { return nil } +func (f *mciFactory) Autoscaling() autoscalinginformers.Interface { return nil } +func (f *mciFactory) Batch() batchinformers.Interface { return nil } +func (f *mciFactory) Certificates() certificatesinformers.Interface { return nil } +func (f *mciFactory) Coordination() coordinationinformers.Interface { return nil } +func (f *mciFactory) Events() eventsinformers.Interface { return nil } +func (f *mciFactory) Extensions() extensionsinformers.Interface { return nil } +func (f *mciFactory) Flowcontrol() flowcontrolinformers.Interface { return nil } +func (f *mciFactory) Networking() networkinginformers.Interface { return nil } +func (f *mciFactory) Node() nodeinformers.Interface { return nil } +func (f *mciFactory) Policy() policyinformers.Interface { return nil } +func (f *mciFactory) Rbac() rbacinformers.Interface { return nil } +func (f *mciFactory) Resource() resourceinformers.Interface { return nil } +func (f *mciFactory) Scheduling() schedulinginformers.Interface { return nil } +func (f *mciFactory) Storage() storageinformers.Interface { return nil } +func (f *mciFactory) Storagemigration() storagemigrationinformers.Interface { return nil } + +// --- Core group --- + +type mciCoreGroup struct{ f *mciFactory } + +func (g *mciCoreGroup) V1() coreinformersv1.Interface { + return &mciCoreV1{f: g.f} +} + +type mciCoreV1 struct{ f *mciFactory } + +func (v *mciCoreV1) Namespaces() coreinformersv1.NamespaceInformer { + if v.f.cfg.Namespaces == nil { + return nil + } + return &mciTypedInformer[corelisters.NamespaceLister]{ + mci: v.f.cfg.Namespaces, + clusterID: v.f.cfg.ClusterID, + newLister: NewNamespaceLister, + } +} + +func (v *mciCoreV1) Services() coreinformersv1.ServiceInformer { + if v.f.cfg.Services == nil { + return nil + } + return &mciTypedInformer[corelisters.ServiceLister]{ + mci: v.f.cfg.Services, + clusterID: v.f.cfg.ClusterID, + newLister: NewServiceLister, + } +} + +// Unsupported Core V1 informers — return nil (not called by webhook/namespace plugins). +func (v *mciCoreV1) ComponentStatuses() coreinformersv1.ComponentStatusInformer { return nil } +func (v *mciCoreV1) ConfigMaps() coreinformersv1.ConfigMapInformer { return nil } +func (v *mciCoreV1) Endpoints() coreinformersv1.EndpointsInformer { return nil } +func (v *mciCoreV1) Events() coreinformersv1.EventInformer { return nil } +func (v *mciCoreV1) LimitRanges() coreinformersv1.LimitRangeInformer { return nil } +func (v *mciCoreV1) Nodes() coreinformersv1.NodeInformer { return nil } +func (v *mciCoreV1) PersistentVolumes() coreinformersv1.PersistentVolumeInformer { + return nil +} +func (v *mciCoreV1) PersistentVolumeClaims() coreinformersv1.PersistentVolumeClaimInformer { + return nil +} +func (v *mciCoreV1) Pods() coreinformersv1.PodInformer { return nil } +func (v *mciCoreV1) PodTemplates() coreinformersv1.PodTemplateInformer { return nil } +func (v *mciCoreV1) ReplicationControllers() coreinformersv1.ReplicationControllerInformer { return nil } +func (v *mciCoreV1) ResourceQuotas() coreinformersv1.ResourceQuotaInformer { return nil } +func (v *mciCoreV1) Secrets() coreinformersv1.SecretInformer { return nil } +func (v *mciCoreV1) ServiceAccounts() coreinformersv1.ServiceAccountInformer { return nil } + +// --- Discovery group --- + +type mciDiscoveryGroup struct{ f *mciFactory } + +func (g *mciDiscoveryGroup) V1() discoveryinformersv1.Interface { + return &mciDiscoveryV1{f: g.f} +} + +func (g *mciDiscoveryGroup) V1beta1() discoveryinformersv1beta1.Interface { + return nil +} + +type mciDiscoveryV1 struct{ f *mciFactory } + +func (v *mciDiscoveryV1) EndpointSlices() discoveryinformersv1.EndpointSliceInformer { + if v.f.cfg.EndpointSlices == nil { + return nil + } + return &mciTypedInformer[discoverylisters.EndpointSliceLister]{ + mci: v.f.cfg.EndpointSlices, + clusterID: v.f.cfg.ClusterID, + newLister: NewEndpointSliceLister, + } +} + +// --- Admissionregistration group --- + +type mciAdmissionregistrationGroup struct{ f *mciFactory } + +func (g *mciAdmissionregistrationGroup) V1() admissionregistrationinformersv1.Interface { + return &mciAdmissionregistrationV1{f: g.f} +} + +func (g *mciAdmissionregistrationGroup) V1alpha1() admissionregistrationinformersv1alpha1.Interface { + return nil +} + +func (g *mciAdmissionregistrationGroup) V1beta1() admissionregistrationinformersv1beta1.Interface { + return nil +} + +type mciAdmissionregistrationV1 struct{ f *mciFactory } + +func (v *mciAdmissionregistrationV1) MutatingWebhookConfigurations() admissionregistrationinformersv1.MutatingWebhookConfigurationInformer { + if v.f.cfg.MutatingWebhooks == nil { + return nil + } + return &mciTypedInformer[admissionregistrationlisters.MutatingWebhookConfigurationLister]{ + mci: v.f.cfg.MutatingWebhooks, + clusterID: v.f.cfg.ClusterID, + newLister: NewMutatingWebhookConfigurationLister, + } +} + +func (v *mciAdmissionregistrationV1) ValidatingWebhookConfigurations() admissionregistrationinformersv1.ValidatingWebhookConfigurationInformer { + if v.f.cfg.ValidatingWebhooks == nil { + return nil + } + return &mciTypedInformer[admissionregistrationlisters.ValidatingWebhookConfigurationLister]{ + mci: v.f.cfg.ValidatingWebhooks, + clusterID: v.f.cfg.ClusterID, + newLister: NewValidatingWebhookConfigurationLister, + } +} + +func (v *mciAdmissionregistrationV1) ValidatingAdmissionPolicies() admissionregistrationinformersv1.ValidatingAdmissionPolicyInformer { + return nil +} + +func (v *mciAdmissionregistrationV1) ValidatingAdmissionPolicyBindings() admissionregistrationinformersv1.ValidatingAdmissionPolicyBindingInformer { + return nil +} + +func (v *mciAdmissionregistrationV1) MutatingAdmissionPolicies() admissionregistrationinformersv1.MutatingAdmissionPolicyInformer { + return nil +} + +func (v *mciAdmissionregistrationV1) MutatingAdmissionPolicyBindings() admissionregistrationinformersv1.MutatingAdmissionPolicyBindingInformer { + return nil +} + +// --- Generic typed informer adapter --- + +// mciTypedInformer satisfies typed informer interfaces (e.g., coreinformersv1.NamespaceInformer) +// by delegating Informer() to MCI.ForCluster() and Lister() to a typed lister constructor. +type mciTypedInformer[L any] struct { + mci *mcinformer.MultiClusterInformer + clusterID string + newLister func(*mcinformer.MultiClusterInformer, string) L +} + +func (a *mciTypedInformer[L]) Informer() cache.SharedIndexInformer { + return a.mci.ForCluster(a.clusterID) +} + +func (a *mciTypedInformer[L]) Lister() L { + return a.newLister(a.mci, a.clusterID) +} + +var _ informers.SharedInformerFactory = (*mciFactory)(nil) diff --git a/test/smoke/apiserver_test.go b/test/smoke/apiserver_test.go index 39aef50..972e04e 100644 --- a/test/smoke/apiserver_test.go +++ b/test/smoke/apiserver_test.go @@ -114,6 +114,7 @@ func mustWriteTokenFile(t *testing.T, path string) string { type apiserverOptions struct { rootCluster string etcdPrefix string + port int extraArgs []string } @@ -128,7 +129,10 @@ func startAPIServerWithOptions(t *testing.T, etcdEndpoints string, opts apiserve } bin := buildAPIServerBinary(t) - port := mustFreePort(t) + port := opts.port + if port == 0 { + port = mustFreePort(t) + } tmp := t.TempDir() ctx, cancel := context.WithCancel(context.Background()) diff --git a/test/smoke/auth_restart_test.go b/test/smoke/auth_restart_test.go new file mode 100644 index 0000000..81df342 --- /dev/null +++ b/test/smoke/auth_restart_test.go @@ -0,0 +1,121 @@ +package smoke + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + authorizationv1 "k8s.io/api/authorization/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestRBACIsolationAfterAPIServerRestart(t *testing.T) { + etcd := os.Getenv("ETCD_ENDPOINTS") + if etcd == "" { + t.Skip("ETCD_ENDPOINTS is not set; skipping integration smoke tests") + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + + port := mustFreePort(t) + prefix := fmt.Sprintf("/registry-smoke-restart-rbac-%d", time.Now().UnixNano()) + opts := apiserverOptions{etcdPrefix: prefix, port: port} + + s1 := startAPIServerWithOptions(t, etcd, opts) + clusterA := "c-" + randSuffix(3) + subjectUser := "user-" + randSuffix(4) + roleName := "role-" + randSuffix(4) + bindingName := "rb-" + randSuffix(4) + + csA1 := kubeClientForCluster(t, s1, clusterA) + csRoot1 := kubeClientForCluster(t, s1, s1.root) + + _, err := csA1.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: roleName}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get"}, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("cluster=%s create clusterrole: %v", clusterA, err) + } + _, err = csA1.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: bindingName}, + Subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: subjectUser}, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("cluster=%s create clusterrolebinding: %v", clusterA, err) + } + + sar := &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + User: subjectUser, + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Namespace: "default", + Verb: "get", + Group: "", + Resource: "configmaps", + }, + }, + } + waitForSubjectAccessReviewWithLogs(ctx, t, csA1, sar, true, s1.logs) + waitForSubjectAccessReviewWithLogs(ctx, t, csRoot1, sar, false, s1.logs) + + // Simulate a full process restart while keeping persisted etcd state. + s1.Stop() + + s2 := startAPIServerWithOptions(t, etcd, opts) + csA2 := kubeClientForCluster(t, s2, clusterA) + csRoot2 := kubeClientForCluster(t, s2, s2.root) + + waitForRBACObject := func(name string, getFn func(context.Context, string, metav1.GetOptions) (interface{}, error)) { + deadline := time.Now().Add(20 * time.Second) + var lastErr error + for time.Now().Before(deadline) { + if _, err := getFn(ctx, name, metav1.GetOptions{}); err == nil { + return + } else { + lastErr = err + } + time.Sleep(300 * time.Millisecond) + } + t.Fatalf("cluster=%s object %q not visible after restart: %v", clusterA, name, lastErr) + } + waitForRBACObject(roleName, func(ctx context.Context, name string, opts metav1.GetOptions) (interface{}, error) { + return csA2.RbacV1().ClusterRoles().Get(ctx, name, opts) + }) + waitForRBACObject(bindingName, func(ctx context.Context, name string, opts metav1.GetOptions) (interface{}, error) { + return csA2.RbacV1().ClusterRoleBindings().Get(ctx, name, opts) + }) + if _, err := csRoot2.RbacV1().ClusterRoles().Get(ctx, roleName, metav1.GetOptions{}); err == nil { + t.Fatalf("unexpectedly found clusterrole %q in root cluster after restart", roleName) + } else if !apierrors.IsNotFound(err) { + t.Fatalf("root clusterrole get unexpected error: %v", err) + } + if _, err := csRoot2.RbacV1().ClusterRoleBindings().Get(ctx, bindingName, metav1.GetOptions{}); err == nil { + t.Fatalf("unexpectedly found clusterrolebinding %q in root cluster after restart", bindingName) + } else if !apierrors.IsNotFound(err) { + t.Fatalf("root clusterrolebinding get unexpected error: %v", err) + } + + // Shared RBAC projection must rebuild from relist/watch after restart. + waitForSubjectAccessReviewWithLogs(ctx, t, csA2, sar, true, s2.logs) + waitForSubjectAccessReviewWithLogs(ctx, t, csRoot2, sar, false, s2.logs) +} + diff --git a/test/smoke/auth_test.go b/test/smoke/auth_test.go index f832abb..e8efa64 100644 --- a/test/smoke/auth_test.go +++ b/test/smoke/auth_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - mc "github.com/kplane-dev/apiserver/pkg/multicluster" authenticationv1 "k8s.io/api/authentication/v1" authorizationv1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" @@ -45,12 +44,10 @@ func TestRBACIsolationAcrossClusters(t *testing.T) { }, }, metav1.CreateOptions{}) if err != nil { - t.Fatalf("cluster=%s create clusterrole: %v", clusterA, err) + t.Fatalf("cluster=%s create clusterrole: %v\nlogs:\n%s", clusterA, err, s.logs()) } - if got, getErr := csA.RbacV1().ClusterRoles().Get(ctx, roleName, metav1.GetOptions{}); getErr != nil { + if _, getErr := csA.RbacV1().ClusterRoles().Get(ctx, roleName, metav1.GetOptions{}); getErr != nil { t.Fatalf("cluster=%s get clusterrole: %v", clusterA, getErr) - } else if got.Labels[mc.DefaultClusterAnnotation] != clusterA { - t.Fatalf("cluster=%s clusterrole missing label %q=%q labels=%v", clusterA, mc.DefaultClusterAnnotation, clusterA, got.Labels) } t.Cleanup(func() { _ = csA.RbacV1().ClusterRoles().Delete(context.Background(), roleName, metav1.DeleteOptions{}) @@ -70,10 +67,8 @@ func TestRBACIsolationAcrossClusters(t *testing.T) { if err != nil { t.Fatalf("cluster=%s create clusterrolebinding: %v", clusterA, err) } - if got, getErr := csA.RbacV1().ClusterRoleBindings().Get(ctx, bindingName, metav1.GetOptions{}); getErr != nil { + if _, getErr := csA.RbacV1().ClusterRoleBindings().Get(ctx, bindingName, metav1.GetOptions{}); getErr != nil { t.Fatalf("cluster=%s get clusterrolebinding: %v", clusterA, getErr) - } else if got.Labels[mc.DefaultClusterAnnotation] != clusterA { - t.Fatalf("cluster=%s clusterrolebinding missing label %q=%q labels=%v", clusterA, mc.DefaultClusterAnnotation, clusterA, got.Labels) } t.Cleanup(func() { _ = csA.RbacV1().ClusterRoleBindings().Delete(context.Background(), bindingName, metav1.DeleteOptions{}) @@ -141,7 +136,7 @@ func TestServiceAccountTokenIsolationAcrossClusters(t *testing.T) { waitForTokenReview(ctx, t, csA, tokenRoot, false) } -func TestRBACCreateSetsClusterLabel(t *testing.T) { +func TestRBACCreateClusterRole(t *testing.T) { etcd := os.Getenv("ETCD_ENDPOINTS") s := startAPIServer(t, etcd) @@ -165,8 +160,8 @@ func TestRBACCreateSetsClusterLabel(t *testing.T) { if err != nil { t.Fatalf("cluster=%s create clusterrole: %v", clusterA, err) } - if obj.Labels[mc.DefaultClusterAnnotation] != clusterA { - t.Fatalf("expected cluster label %q=%q, got labels=%v", mc.DefaultClusterAnnotation, clusterA, obj.Labels) + if obj == nil { + t.Fatalf("expected created clusterrole object") } } diff --git a/test/smoke/memory_200vcp_test.go b/test/smoke/memory_200vcp_test.go index 02f3ba7..cf4df4d 100644 --- a/test/smoke/memory_200vcp_test.go +++ b/test/smoke/memory_200vcp_test.go @@ -137,7 +137,7 @@ func exerciseClusterForMemory(ctx context.Context, t *testing.T, s *testAPIServe plural := "memwidgets" crdName := plural + "." + group createTestCRD(ctx, t, crdClient, crdName, group, plural) - waitForCRDEstablished(ctx, t, crdClient, clusterID, crdName) + waitForCRDEstablished(ctx, t, crdClient, clusterID, crdName) waitForResourcePresence(t, cs, clusterID, group+"/v1", plural, true) return nil } diff --git a/test/smoke/no_etcd_metadata_test.go b/test/smoke/no_etcd_metadata_test.go new file mode 100644 index 0000000..53e7e88 --- /dev/null +++ b/test/smoke/no_etcd_metadata_test.go @@ -0,0 +1,159 @@ +package smoke + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + "testing" + "time" + + clientv3 "go.etcd.io/etcd/client/v3" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TestNoIdentityMetadataInEtcd creates objects through the API, then reads the +// raw etcd values to verify that cluster identity is NOT stored as labels or +// annotations on the object. Identity is derived entirely from the etcd key path. +func TestNoIdentityMetadataInEtcd(t *testing.T) { + etcdEndpoints := os.Getenv("ETCD_ENDPOINTS") + if etcdEndpoints == "" { + t.Skip("ETCD_ENDPOINTS not set") + } + + prefix := fmt.Sprintf("/registry-etcd-meta-%d", time.Now().UnixNano()) + s := startAPIServerWithOptions(t, etcdEndpoints, apiserverOptions{ + etcdPrefix: prefix, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + clusters := []string{s.root, "c-" + randSuffix(3)} + cmNames := map[string]string{} + + for _, cid := range clusters { + cs := kubeClientForCluster(t, s, cid) + if err := waitForNamespace(ctx, cs, "default"); err != nil { + t.Fatalf("cluster=%s wait namespace: %v", cid, err) + } + + cmName := "etcd-check-" + randSuffix(4) + cmNames[cid] = cmName + + _, err := cs.CoreV1().ConfigMaps("default").Create(ctx, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Labels: map[string]string{"user-label": "yes"}, + }, + Data: map[string]string{"hello": "world"}, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("cluster=%s create: %v", cid, err) + } + } + + // Now read raw etcd to inspect stored objects + etcdClient, err := clientv3.New(clientv3.Config{ + Endpoints: strings.Split(etcdEndpoints, ","), + DialTimeout: 5 * time.Second, + }) + if err != nil { + t.Fatalf("etcd client: %v", err) + } + defer etcdClient.Close() + + // List all keys under the prefix to find our configmaps + resp, err := etcdClient.Get(ctx, prefix, clientv3.WithPrefix()) + if err != nil { + t.Fatalf("etcd get: %v", err) + } + + for _, cid := range clusters { + cmName := cmNames[cid] + found := false + + for _, kv := range resp.Kvs { + key := string(kv.Key) + if !strings.Contains(key, cmName) { + continue + } + found = true + + t.Logf("cluster=%s key=%s (len=%d)", cid, key, len(kv.Value)) + + // The value is protobuf-encoded, but labels/annotations would still + // appear as readable strings. Check raw bytes for identity keywords. + raw := string(kv.Value) + identityKeywords := []string{ + "clusterid", "cluster-id", "cluster_id", + "kplane", "storagekey", "storage-key", + "multicluster", "identity", + } + for _, keyword := range identityKeywords { + if strings.Contains(strings.ToLower(raw), keyword) { + t.Errorf("cluster=%s cm=%s: raw etcd value contains identity keyword %q", cid, cmName, keyword) + } + } + + // Also try to find the JSON metadata section and parse it. + // Protobuf values have the JSON-like metadata embedded. + // Look for labels/annotations that shouldn't be there. + if idx := strings.Index(raw, `"labels"`); idx >= 0 { + // Extract a chunk around the labels for inspection + end := idx + 200 + if end > len(raw) { + end = len(raw) + } + chunk := raw[idx:end] + // The only label should be "user-label" + if strings.Contains(chunk, "cluster") { + t.Errorf("cluster=%s cm=%s: etcd labels section contains 'cluster': ...%s...", cid, cmName, chunk) + } + } + + // Try JSON decode as a secondary check (works if stored as JSON) + var obj map[string]interface{} + if json.Unmarshal(kv.Value, &obj) == nil { + meta, _ := obj["metadata"].(map[string]interface{}) + if meta != nil { + labels, _ := meta["labels"].(map[string]interface{}) + annotations, _ := meta["annotations"].(map[string]interface{}) + for k := range labels { + if k != "user-label" && isIdentityMetadata(k) { + t.Errorf("cluster=%s cm=%s: etcd JSON has identity label %q", cid, cmName, k) + } + } + for k := range annotations { + if isIdentityMetadata(k) { + t.Errorf("cluster=%s cm=%s: etcd JSON has identity annotation %q", cid, cmName, k) + } + } + } + } + } + + if !found { + t.Errorf("cluster=%s: configmap %s not found in etcd under prefix %s", cid, cmName, prefix) + } + } + + // Verify the key path contains cluster identity (structural check) + for _, kv := range resp.Kvs { + key := string(kv.Key) + if !strings.Contains(key, "configmaps") { + continue + } + for _, cid := range clusters { + if strings.Contains(key, cmNames[cid]) { + if !strings.Contains(key, "/clusters/"+cid+"/") { + t.Errorf("expected key to contain /clusters/%s/, got: %s", cid, key) + } else { + t.Logf("confirmed: cluster=%s identity is in key path: %s", cid, key) + } + } + } + } +} diff --git a/test/smoke/no_metadata_leak_test.go b/test/smoke/no_metadata_leak_test.go new file mode 100644 index 0000000..a9bd310 --- /dev/null +++ b/test/smoke/no_metadata_leak_test.go @@ -0,0 +1,147 @@ +package smoke + +import ( + "context" + "os" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" +) + +// TestNoMetadataLeakOnExternalObjects verifies that cluster identity metadata +// (labels, annotations) never appears on objects returned through the API. +// The ObjectWithClusterIdentity envelope must be fully stripped before objects +// reach the client via Create response, Get, List, and Watch. +func TestNoMetadataLeakOnExternalObjects(t *testing.T) { + etcd := os.Getenv("ETCD_ENDPOINTS") + s := startAPIServer(t, etcd) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + clusters := []string{s.root, "c-" + randSuffix(3), "c-" + randSuffix(3)} + + for _, cid := range clusters { + cs := kubeClientForCluster(t, s, cid) + ns := "default" + if err := waitForNamespace(ctx, cs, ns); err != nil { + t.Fatalf("cluster=%s wait namespace: %v", cid, err) + } + + cmName := "leak-test-" + randSuffix(4) + + // Start a watch before creating so we can catch the ADDED event. + watcher, err := cs.CoreV1().ConfigMaps(ns).Watch(ctx, metav1.ListOptions{}) + if err != nil { + t.Fatalf("cluster=%s watch: %v", cid, err) + } + + // --- Create response --- + created, err := cs.CoreV1().ConfigMaps(ns).Create(ctx, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Labels: map[string]string{"user-label": "yes"}, + }, + Data: map[string]string{"k": "v"}, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("cluster=%s create: %v", cid, err) + } + assertNoIdentityMetadata(t, cid, "create-response", created) + + // --- Get --- + got, err := cs.CoreV1().ConfigMaps(ns).Get(ctx, cmName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("cluster=%s get: %v", cid, err) + } + assertNoIdentityMetadata(t, cid, "get", got) + + // --- List --- + list, err := cs.CoreV1().ConfigMaps(ns).List(ctx, metav1.ListOptions{}) + if err != nil { + t.Fatalf("cluster=%s list: %v", cid, err) + } + for i := range list.Items { + if list.Items[i].Name == cmName { + assertNoIdentityMetadata(t, cid, "list", &list.Items[i]) + } + } + + // --- Watch --- + deadline := time.After(10 * time.Second) + watchLoop: + for { + select { + case evt := <-watcher.ResultChan(): + if evt.Type == watch.Bookmark || evt.Object == nil { + continue + } + cm, ok := evt.Object.(*corev1.ConfigMap) + if !ok { + continue + } + if cm.Name == cmName { + assertNoIdentityMetadata(t, cid, "watch", cm) + break watchLoop + } + case <-deadline: + t.Fatalf("cluster=%s timed out waiting for watch event for %s", cid, cmName) + } + } + watcher.Stop() + + // --- Update response --- + got.Data["k"] = "v2" + updated, err := cs.CoreV1().ConfigMaps(ns).Update(ctx, got, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("cluster=%s update: %v", cid, err) + } + assertNoIdentityMetadata(t, cid, "update-response", updated) + + // Cleanup + _ = cs.CoreV1().ConfigMaps(ns).Delete(ctx, cmName, metav1.DeleteOptions{}) + } +} + +// assertNoIdentityMetadata checks that a ConfigMap returned by the API has no +// cluster-identity-related labels or annotations. The only label allowed is +// the one explicitly set by the test ("user-label"). +func assertNoIdentityMetadata(t *testing.T, cluster, path string, cm *corev1.ConfigMap) { + t.Helper() + + // Check labels — only "user-label" should be present + for k := range cm.Labels { + if k == "user-label" { + continue + } + if isIdentityMetadata(k) { + t.Errorf("cluster=%s path=%s: unexpected identity label %q=%q on %s", cluster, path, k, cm.Labels[k], cm.Name) + } + } + + // Check annotations — none expected from identity system + for k := range cm.Annotations { + if isIdentityMetadata(k) { + t.Errorf("cluster=%s path=%s: unexpected identity annotation %q=%q on %s", cluster, path, k, cm.Annotations[k], cm.Name) + } + } +} + +func isIdentityMetadata(key string) bool { + suspects := []string{ + "cluster", "clusterid", "cluster-id", "cluster_id", + "kplane", "multicluster", "storage-key", "storagekey", + "identity", "vcp", + } + lower := strings.ToLower(key) + for _, s := range suspects { + if strings.Contains(lower, s) { + return true + } + } + return false +}