From 50c73ead54a7f4a14c2c79943c4b76235aa70a20 Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Thu, 26 Feb 2026 19:30:20 -0600 Subject: [PATCH 1/7] feat(notes): add multi-cluster support for notes webhook subject lookup The notes webhook was failing to find subject resources that exist in project control planes because it only queried the local cluster. This change injects the multicluster manager into the notes webhooks, enabling them to search for subjects across all engaged clusters. Changes: - Add ListEngagedClusters() method to Provider for cluster iteration - Create ClusterGetter interface and MultiClusterGetter implementation - Update Note and ClusterNote webhooks to search all clusters for subjects - Wire up multicluster manager in controller-manager for note webhooks - Add E2E tests for multi-cluster subject lookups using ConfigMaps/Namespaces The webhook now searches local cluster first, then all project control planes. Graceful fallback to local-only search when Clusters is nil maintains backwards compatibility for unit tests. Co-Authored-By: Claude Opus 4.5 --- Taskfile.yaml | 2 +- .../controller-manager/controllermanager.go | 25 ++-- .../notes/v1alpha1/clusternote_webhook.go | 52 ++++++-- .../v1alpha1/clusternote_webhook_test.go | 1 + .../webhooks/notes/v1alpha1/multicluster.go | 39 ++++++ .../webhooks/notes/v1alpha1/note_webhook.go | 52 ++++++-- .../notes/v1alpha1/note_webhook_test.go | 1 + pkg/multicluster-runtime/milo/provider.go | 13 ++ .../01-organization.yaml | 8 ++ .../chainsaw-test.yaml | 117 +++++++++++++++++ .../test-data/clusternote.yaml | 12 ++ .../test-data/namespace.yaml | 6 + .../test-data/project.yaml | 10 ++ .../01-organization.yaml | 8 ++ .../chainsaw-test.yaml | 118 ++++++++++++++++++ .../test-data/configmap.yaml | 9 ++ .../test-data/note.yaml | 14 +++ .../test-data/project.yaml | 10 ++ 18 files changed, 462 insertions(+), 35 deletions(-) create mode 100644 internal/webhooks/notes/v1alpha1/multicluster.go create mode 100644 test/notes/clusternote-multicluster-subject/01-organization.yaml create mode 100644 test/notes/clusternote-multicluster-subject/chainsaw-test.yaml create mode 100644 test/notes/clusternote-multicluster-subject/test-data/clusternote.yaml create mode 100644 test/notes/clusternote-multicluster-subject/test-data/namespace.yaml create mode 100644 test/notes/clusternote-multicluster-subject/test-data/project.yaml create mode 100644 test/notes/note-multicluster-subject/01-organization.yaml create mode 100644 test/notes/note-multicluster-subject/chainsaw-test.yaml create mode 100644 test/notes/note-multicluster-subject/test-data/configmap.yaml create mode 100644 test/notes/note-multicluster-subject/test-data/note.yaml create mode 100644 test/notes/note-multicluster-subject/test-data/project.yaml diff --git a/Taskfile.yaml b/Taskfile.yaml index be9302be..e7435ec7 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -274,7 +274,7 @@ tasks: fi echo "Test paths: $TEST_PATHS" - KUBECONFIG=.milo/kubeconfig "{{.TOOL_DIR}}/chainsaw" test $TEST_PATHS --selector "requires!=authorization-provider" + KUBECONFIG=.milo/kubeconfig "{{.TOOL_DIR}}/chainsaw" test $TEST_PATHS --selector "requires!=authorization-provider,requires!=multicluster" silent: true test:unit: diff --git a/cmd/milo/controller-manager/controllermanager.go b/cmd/milo/controller-manager/controllermanager.go index 87bf5e30..d5b52cf7 100644 --- a/cmd/milo/controller-manager/controllermanager.go +++ b/cmd/milo/controller-manager/controllermanager.go @@ -553,14 +553,9 @@ func Run(ctx context.Context, c *config.CompletedConfig, opts *Options) error { logger.Error(err, "Error setting up platform access rejection webhook") klog.FlushAndExit(klog.ExitFlushTimeout, 1) } - if err := notesv1alpha1webhook.SetupNoteWebhooksWithManager(ctrl); err != nil { - logger.Error(err, "Error setting up note webhook") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) - } - if err := notesv1alpha1webhook.SetupClusterNoteWebhooksWithManager(ctrl); err != nil { - logger.Error(err, "Error setting up clusternote webhook") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) - } + // Note webhooks are set up later after the multicluster provider is created, + // so they can share it for cross-cluster subject lookups. + // See the noteClusterGetter setup after the quota multicluster manager. projectCtrl := resourcemanagercontroller.ProjectController{ ControlPlaneClient: ctrl.GetClient(), @@ -666,6 +661,20 @@ func Run(ctx context.Context, c *config.CompletedConfig, opts *Options) error { } logger.Info("Local cluster engaged successfully") + // Set up note webhooks using the shared multicluster provider for cross-cluster subject lookups + noteClusterGetter := ¬esv1alpha1webhook.MultiClusterGetter{ + Manager: mcMgr, + Provider: provider, + } + if err := notesv1alpha1webhook.SetupNoteWebhooksWithManager(ctrl, noteClusterGetter); err != nil { + logger.Error(err, "Error setting up note webhook") + klog.FlushAndExit(klog.ExitFlushTimeout, 1) + } + if err := notesv1alpha1webhook.SetupClusterNoteWebhooksWithManager(ctrl, noteClusterGetter); err != nil { + logger.Error(err, "Error setting up clusternote webhook") + klog.FlushAndExit(klog.ExitFlushTimeout, 1) + } + // Start concurrently to resolve circular dependency between provider and manager go func() { logger.Info("Starting Datum cluster provider") diff --git a/internal/webhooks/notes/v1alpha1/clusternote_webhook.go b/internal/webhooks/notes/v1alpha1/clusternote_webhook.go index 5559a8d5..2097efd6 100644 --- a/internal/webhooks/notes/v1alpha1/clusternote_webhook.go +++ b/internal/webhooks/notes/v1alpha1/clusternote_webhook.go @@ -23,7 +23,7 @@ import ( var clusterNoteLog = logf.Log.WithName("clusternote-resource") -func SetupClusterNoteWebhooksWithManager(mgr ctrl.Manager) error { +func SetupClusterNoteWebhooksWithManager(mgr ctrl.Manager, clusters ClusterGetter) error { clusterNoteLog.Info("Setting up notes.miloapis.com clusternote webhooks") return ctrl.NewWebhookManagedBy(mgr). For(¬esv1alpha1.ClusterNote{}). @@ -31,6 +31,7 @@ func SetupClusterNoteWebhooksWithManager(mgr ctrl.Manager) error { Client: mgr.GetClient(), Scheme: mgr.GetScheme(), RESTMapper: mgr.GetRESTMapper(), + Clusters: clusters, }). WithValidator(&ClusterNoteValidator{ Client: mgr.GetClient(), @@ -44,6 +45,7 @@ type ClusterNoteMutator struct { Client client.Client Scheme *runtime.Scheme RESTMapper meta.RESTMapper + Clusters ClusterGetter // For multi-cluster lookups } var _ admission.CustomDefaulter = &ClusterNoteMutator{} @@ -97,24 +99,48 @@ func (m *ClusterNoteMutator) setSubjectOwnerReference(ctx context.Context, clust return fmt.Errorf("failed to get REST mapping for %s: %w", groupKind, err) } - subject := &unstructured.Unstructured{} - subject.SetGroupVersionKind(mapping.GroupVersionKind) - - if err := m.Client.Get(ctx, types.NamespacedName{ + key := types.NamespacedName{ Name: clusterNote.Spec.SubjectRef.Name, - }, subject); err != nil { - if errors.IsNotFound(err) { - return fmt.Errorf("subject resource not found: %w", err) + } + + // If no multi-cluster support, fall back to local client only + if m.Clusters == nil { + subject := &unstructured.Unstructured{} + subject.SetGroupVersionKind(mapping.GroupVersionKind) + if err := m.Client.Get(ctx, key, subject); err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("subject resource not found: %w", err) + } + return fmt.Errorf("failed to get subject resource: %w", err) } - return fmt.Errorf("failed to get subject resource: %w", err) + return controllerutil.SetOwnerReference(subject, clusterNote, m.Scheme) } - // Set owner reference - if err := controllerutil.SetOwnerReference(subject, clusterNote, m.Scheme); err != nil { - return fmt.Errorf("failed to set owner reference: %w", err) + // Search all clusters (local first, then project control planes) + for _, clusterName := range m.Clusters.ListClusterNames() { + cluster, err := m.Clusters.GetCluster(ctx, clusterName) + if err != nil { + clusterNoteLog.V(1).Info("Failed to get cluster", "cluster", clusterName, "error", err) + continue + } + + subject := &unstructured.Unstructured{} + subject.SetGroupVersionKind(mapping.GroupVersionKind) + + if err := cluster.GetClient().Get(ctx, key, subject); err == nil { + if clusterName != "" { + clusterNoteLog.Info("Found subject in project control plane", + "cluster", clusterName, + "subject", key) + } + return controllerutil.SetOwnerReference(subject, clusterNote, m.Scheme) + } } - return nil + return fmt.Errorf("subject resource not found in any cluster: %s/%s %s", + clusterNote.Spec.SubjectRef.APIGroup, + clusterNote.Spec.SubjectRef.Kind, + clusterNote.Spec.SubjectRef.Name) } // +kubebuilder:webhook:path=/validate-notes-miloapis-com-v1alpha1-clusternote,mutating=false,failurePolicy=fail,sideEffects=None,groups=notes.miloapis.com,resources=clusternotes,verbs=create;update,versions=v1alpha1,name=vclusternote.notes.miloapis.com,admissionReviewVersions={v1,v1beta1},serviceName=milo-controller-manager,servicePort=9443,serviceNamespace=milo-system diff --git a/internal/webhooks/notes/v1alpha1/clusternote_webhook_test.go b/internal/webhooks/notes/v1alpha1/clusternote_webhook_test.go index e3a4b0af..e3a0b393 100644 --- a/internal/webhooks/notes/v1alpha1/clusternote_webhook_test.go +++ b/internal/webhooks/notes/v1alpha1/clusternote_webhook_test.go @@ -129,6 +129,7 @@ func TestClusterNoteMutator_Default(t *testing.T) { Client: fakeClient, Scheme: testScheme, RESTMapper: newTestRESTMapper(), + Clusters: nil, // nil means local-only search (backwards compatible) } // Create admission request context diff --git a/internal/webhooks/notes/v1alpha1/multicluster.go b/internal/webhooks/notes/v1alpha1/multicluster.go new file mode 100644 index 00000000..d68140b3 --- /dev/null +++ b/internal/webhooks/notes/v1alpha1/multicluster.go @@ -0,0 +1,39 @@ +package v1alpha1 + +import ( + "context" + + miloprovider "go.miloapis.com/milo/pkg/multicluster-runtime/milo" + "sigs.k8s.io/controller-runtime/pkg/cluster" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" +) + +// ClusterGetter provides access to clusters in the multicluster runtime. +type ClusterGetter interface { + // GetCluster returns a cluster by name. Empty string returns the local cluster. + GetCluster(ctx context.Context, name string) (cluster.Cluster, error) + // ListClusterNames returns names of all engaged clusters including "" for local. + ListClusterNames() []string +} + +// MultiClusterGetter implements ClusterGetter using mcmanager and provider. +type MultiClusterGetter struct { + Manager mcmanager.Manager + Provider *miloprovider.Provider +} + +// GetCluster returns a cluster by name. Empty string returns the local cluster. +func (g *MultiClusterGetter) GetCluster(ctx context.Context, name string) (cluster.Cluster, error) { + return g.Manager.GetCluster(ctx, name) +} + +// ListClusterNames returns names of all engaged clusters including "" for local. +func (g *MultiClusterGetter) ListClusterNames() []string { + // Start with local cluster (empty string) + names := []string{""} + // Add all project control planes + if g.Provider != nil { + names = append(names, g.Provider.ListEngagedClusters()...) + } + return names +} diff --git a/internal/webhooks/notes/v1alpha1/note_webhook.go b/internal/webhooks/notes/v1alpha1/note_webhook.go index 298294fd..1b523627 100644 --- a/internal/webhooks/notes/v1alpha1/note_webhook.go +++ b/internal/webhooks/notes/v1alpha1/note_webhook.go @@ -23,7 +23,7 @@ import ( var noteLog = logf.Log.WithName("note-resource") -func SetupNoteWebhooksWithManager(mgr ctrl.Manager) error { +func SetupNoteWebhooksWithManager(mgr ctrl.Manager, clusters ClusterGetter) error { noteLog.Info("Setting up notes.miloapis.com note webhooks") return ctrl.NewWebhookManagedBy(mgr). For(¬esv1alpha1.Note{}). @@ -31,6 +31,7 @@ func SetupNoteWebhooksWithManager(mgr ctrl.Manager) error { Client: mgr.GetClient(), Scheme: mgr.GetScheme(), RESTMapper: mgr.GetRESTMapper(), + Clusters: clusters, }). WithValidator(&NoteValidator{ Client: mgr.GetClient(), @@ -44,6 +45,7 @@ type NoteMutator struct { Client client.Client Scheme *runtime.Scheme RESTMapper meta.RESTMapper + Clusters ClusterGetter // For multi-cluster lookups } var _ admission.CustomDefaulter = &NoteMutator{} @@ -97,25 +99,49 @@ func (m *NoteMutator) setSubjectOwnerReference(ctx context.Context, note *notesv return fmt.Errorf("failed to get REST mapping for %s: %w", groupKind, err) } - subject := &unstructured.Unstructured{} - subject.SetGroupVersionKind(mapping.GroupVersionKind) - - if err := m.Client.Get(ctx, types.NamespacedName{ + key := types.NamespacedName{ Name: note.Spec.SubjectRef.Name, Namespace: note.Spec.SubjectRef.Namespace, - }, subject); err != nil { - if errors.IsNotFound(err) { - return fmt.Errorf("subject resource not found: %w", err) + } + + // If no multi-cluster support, fall back to local client only + if m.Clusters == nil { + subject := &unstructured.Unstructured{} + subject.SetGroupVersionKind(mapping.GroupVersionKind) + if err := m.Client.Get(ctx, key, subject); err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("subject resource not found: %w", err) + } + return fmt.Errorf("failed to get subject resource: %w", err) } - return fmt.Errorf("failed to get subject resource: %w", err) + return controllerutil.SetOwnerReference(subject, note, m.Scheme) } - // Set owner reference - if err := controllerutil.SetOwnerReference(subject, note, m.Scheme); err != nil { - return fmt.Errorf("failed to set owner reference: %w", err) + // Search all clusters (local first, then project control planes) + for _, clusterName := range m.Clusters.ListClusterNames() { + cluster, err := m.Clusters.GetCluster(ctx, clusterName) + if err != nil { + noteLog.V(1).Info("Failed to get cluster", "cluster", clusterName, "error", err) + continue + } + + subject := &unstructured.Unstructured{} + subject.SetGroupVersionKind(mapping.GroupVersionKind) + + if err := cluster.GetClient().Get(ctx, key, subject); err == nil { + if clusterName != "" { + noteLog.Info("Found subject in project control plane", + "cluster", clusterName, + "subject", key) + } + return controllerutil.SetOwnerReference(subject, note, m.Scheme) + } } - return nil + return fmt.Errorf("subject resource not found in any cluster: %s/%s %s", + note.Spec.SubjectRef.APIGroup, + note.Spec.SubjectRef.Kind, + note.Spec.SubjectRef.Name) } // +kubebuilder:webhook:path=/validate-notes-miloapis-com-v1alpha1-note,mutating=false,failurePolicy=fail,sideEffects=None,groups=notes.miloapis.com,resources=notes,verbs=create;update,versions=v1alpha1,name=vnote.notes.miloapis.com,admissionReviewVersions={v1,v1beta1},serviceName=milo-controller-manager,servicePort=9443,serviceNamespace=milo-system diff --git a/internal/webhooks/notes/v1alpha1/note_webhook_test.go b/internal/webhooks/notes/v1alpha1/note_webhook_test.go index 684b010a..d6ffb9bb 100644 --- a/internal/webhooks/notes/v1alpha1/note_webhook_test.go +++ b/internal/webhooks/notes/v1alpha1/note_webhook_test.go @@ -163,6 +163,7 @@ func TestNoteMutator_Default(t *testing.T) { Client: fakeClient, Scheme: testScheme, RESTMapper: newTestRESTMapper(), + Clusters: nil, // nil means local-only search (backwards compatible) } // Create admission request context diff --git a/pkg/multicluster-runtime/milo/provider.go b/pkg/multicluster-runtime/milo/provider.go index 2055d331..bfe0e4c0 100644 --- a/pkg/multicluster-runtime/milo/provider.go +++ b/pkg/multicluster-runtime/milo/provider.go @@ -298,6 +298,19 @@ func (p *Provider) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result return ctrl.Result{}, nil } +// ListEngagedClusters returns the names of all currently engaged project clusters. +// Note: This does not include the local cluster (empty string key). +func (p *Provider) ListEngagedClusters() []string { + p.lock.Lock() + defer p.lock.Unlock() + + names := make([]string, 0, len(p.projects)) + for name := range p.projects { + names = append(names, name) + } + return names +} + func (p *Provider) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { p.lock.Lock() defer p.lock.Unlock() diff --git a/test/notes/clusternote-multicluster-subject/01-organization.yaml b/test/notes/clusternote-multicluster-subject/01-organization.yaml new file mode 100644 index 00000000..edfa66a7 --- /dev/null +++ b/test/notes/clusternote-multicluster-subject/01-organization.yaml @@ -0,0 +1,8 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: Organization +metadata: + name: cn-mc-test-org + labels: + test.miloapis.com/notes: "clusternote-multicluster" +spec: + type: Standard diff --git a/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml b/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml new file mode 100644 index 00000000..3e0297f1 --- /dev/null +++ b/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml @@ -0,0 +1,117 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: clusternote-multicluster-subject + labels: + requires: multicluster +spec: + description: | + Tests that ClusterNotes can reference cluster-scoped subjects (Namespaces) + in project control planes. + + Validates: + - ClusterNote in project control plane can reference cluster-scoped Namespace + - Owner reference is correctly set on the ClusterNote + - ClusterNote is garbage collected when Namespace is deleted + + clusters: + main: + kubeconfig: kubeconfig-main + org: + kubeconfig: kubeconfig-org + project: + kubeconfig: kubeconfig-project-1 + + steps: + - name: setup-organization + description: Create test organization + cluster: main + try: + - apply: + file: 01-organization.yaml + - wait: + apiVersion: v1 + kind: Namespace + name: organization-cn-mc-test-org + timeout: 30s + for: + jsonPath: + path: '{.status.phase}' + value: Active + + - name: create-project + description: Create project in org control plane + cluster: org + try: + - apply: + file: test-data/project.yaml + - wait: + apiVersion: resourcemanager.miloapis.com/v1alpha1 + kind: Project + name: cn-mc-test-project-1 + timeout: 60s + for: + condition: + name: Ready + value: 'true' + + - name: create-namespace-in-project + description: Create cluster-scoped Namespace in project control plane + cluster: project + try: + - apply: + file: test-data/namespace.yaml + - wait: + apiVersion: v1 + kind: Namespace + name: cn-test-namespace + timeout: 30s + for: + jsonPath: + path: '{.status.phase}' + value: Active + + - name: create-clusternote-referencing-namespace + description: Create ClusterNote referencing the Namespace in project control plane + cluster: project + try: + - apply: + file: test-data/clusternote.yaml + - wait: + apiVersion: notes.miloapis.com/v1alpha1 + kind: ClusterNote + name: test-namespace-clusternote + timeout: 30s + for: + condition: + name: Ready + value: 'True' + + - name: verify-owner-reference + description: Verify owner reference points to Namespace + cluster: project + try: + - assert: + resource: + apiVersion: notes.miloapis.com/v1alpha1 + kind: ClusterNote + metadata: + name: test-namespace-clusternote + (length(metadata.ownerReferences) > `0`): true + + - name: delete-namespace-verify-clusternote-deletion + description: Delete Namespace and verify ClusterNote is garbage collected + cluster: project + try: + - delete: + ref: + apiVersion: v1 + kind: Namespace + name: cn-test-namespace + - wait: + apiVersion: notes.miloapis.com/v1alpha1 + kind: ClusterNote + name: test-namespace-clusternote + timeout: 60s + for: + deletion: {} diff --git a/test/notes/clusternote-multicluster-subject/test-data/clusternote.yaml b/test/notes/clusternote-multicluster-subject/test-data/clusternote.yaml new file mode 100644 index 00000000..0494dae7 --- /dev/null +++ b/test/notes/clusternote-multicluster-subject/test-data/clusternote.yaml @@ -0,0 +1,12 @@ +apiVersion: notes.miloapis.com/v1alpha1 +kind: ClusterNote +metadata: + name: test-namespace-clusternote + labels: + test.miloapis.com/notes: "clusternote-multicluster" +spec: + content: "This ClusterNote references a Namespace in the project control plane" + subjectRef: + apiGroup: "" + kind: Namespace + name: cn-test-namespace diff --git a/test/notes/clusternote-multicluster-subject/test-data/namespace.yaml b/test/notes/clusternote-multicluster-subject/test-data/namespace.yaml new file mode 100644 index 00000000..f5a987d1 --- /dev/null +++ b/test/notes/clusternote-multicluster-subject/test-data/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: cn-test-namespace + labels: + test.miloapis.com/notes: "clusternote-multicluster" diff --git a/test/notes/clusternote-multicluster-subject/test-data/project.yaml b/test/notes/clusternote-multicluster-subject/test-data/project.yaml new file mode 100644 index 00000000..4d6eb5a5 --- /dev/null +++ b/test/notes/clusternote-multicluster-subject/test-data/project.yaml @@ -0,0 +1,10 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: Project +metadata: + name: cn-mc-test-project-1 + labels: + test.miloapis.com/notes: "clusternote-multicluster" +spec: + ownerRef: + kind: Organization + name: cn-mc-test-org diff --git a/test/notes/note-multicluster-subject/01-organization.yaml b/test/notes/note-multicluster-subject/01-organization.yaml new file mode 100644 index 00000000..78ec9cf7 --- /dev/null +++ b/test/notes/note-multicluster-subject/01-organization.yaml @@ -0,0 +1,8 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: Organization +metadata: + name: note-mc-test-org + labels: + test.miloapis.com/notes: "multicluster" +spec: + type: Standard diff --git a/test/notes/note-multicluster-subject/chainsaw-test.yaml b/test/notes/note-multicluster-subject/chainsaw-test.yaml new file mode 100644 index 00000000..d0ce5b97 --- /dev/null +++ b/test/notes/note-multicluster-subject/chainsaw-test.yaml @@ -0,0 +1,118 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: note-multicluster-subject + labels: + requires: multicluster +spec: + description: | + Tests that Notes can reference subjects (ConfigMaps) in project control planes. + + Validates: + - Note created in project control plane can reference ConfigMap in same control plane + - Owner reference is correctly set on the Note + - Note is garbage collected when ConfigMap is deleted + + clusters: + main: + kubeconfig: kubeconfig-main + org: + kubeconfig: kubeconfig-org + project: + kubeconfig: kubeconfig-project-1 + + steps: + - name: setup-organization + description: Create test organization + cluster: main + try: + - apply: + file: 01-organization.yaml + - wait: + apiVersion: v1 + kind: Namespace + name: organization-note-mc-test-org + timeout: 30s + for: + jsonPath: + path: '{.status.phase}' + value: Active + + - name: create-project + description: Create project in org control plane + cluster: org + try: + - apply: + file: test-data/project.yaml + - wait: + apiVersion: resourcemanager.miloapis.com/v1alpha1 + kind: Project + name: note-mc-test-project-1 + timeout: 60s + for: + condition: + name: Ready + value: 'true' + + - name: create-configmap-in-project + description: Create ConfigMap resource in project control plane + cluster: project + try: + - apply: + file: test-data/configmap.yaml + - assert: + resource: + apiVersion: v1 + kind: ConfigMap + metadata: + name: test-subject-configmap + namespace: default + + - name: create-note-referencing-configmap + description: Create Note referencing the ConfigMap in project control plane + cluster: project + try: + - apply: + file: test-data/note.yaml + - wait: + apiVersion: notes.miloapis.com/v1alpha1 + kind: Note + name: test-configmap-note + namespace: default + timeout: 30s + for: + condition: + name: Ready + value: 'True' + + - name: verify-owner-reference + description: Verify owner reference points to ConfigMap + cluster: project + try: + - assert: + resource: + apiVersion: notes.miloapis.com/v1alpha1 + kind: Note + metadata: + name: test-configmap-note + namespace: default + (length(metadata.ownerReferences) > `0`): true + + - name: delete-configmap-verify-note-deletion + description: Delete ConfigMap and verify Note is garbage collected + cluster: project + try: + - delete: + ref: + apiVersion: v1 + kind: ConfigMap + name: test-subject-configmap + namespace: default + - wait: + apiVersion: notes.miloapis.com/v1alpha1 + kind: Note + name: test-configmap-note + namespace: default + timeout: 60s + for: + deletion: {} diff --git a/test/notes/note-multicluster-subject/test-data/configmap.yaml b/test/notes/note-multicluster-subject/test-data/configmap.yaml new file mode 100644 index 00000000..daf01ab0 --- /dev/null +++ b/test/notes/note-multicluster-subject/test-data/configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-subject-configmap + namespace: default + labels: + test.miloapis.com/notes: "multicluster" +data: + key: value diff --git a/test/notes/note-multicluster-subject/test-data/note.yaml b/test/notes/note-multicluster-subject/test-data/note.yaml new file mode 100644 index 00000000..77c424ec --- /dev/null +++ b/test/notes/note-multicluster-subject/test-data/note.yaml @@ -0,0 +1,14 @@ +apiVersion: notes.miloapis.com/v1alpha1 +kind: Note +metadata: + name: test-configmap-note + namespace: default + labels: + test.miloapis.com/notes: "multicluster" +spec: + content: "This note references a ConfigMap in the project control plane" + subjectRef: + apiGroup: "" + kind: ConfigMap + name: test-subject-configmap + namespace: default diff --git a/test/notes/note-multicluster-subject/test-data/project.yaml b/test/notes/note-multicluster-subject/test-data/project.yaml new file mode 100644 index 00000000..d75cd25a --- /dev/null +++ b/test/notes/note-multicluster-subject/test-data/project.yaml @@ -0,0 +1,10 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: Project +metadata: + name: note-mc-test-project-1 + labels: + test.miloapis.com/notes: "multicluster" +spec: + ownerRef: + kind: Organization + name: note-mc-test-org From b617a07233352a6a871c2ece618d7044349c3512 Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Fri, 27 Feb 2026 13:16:14 -0600 Subject: [PATCH 2/7] fix(test): add kubeconfigs for notes multi-cluster E2E tests Add kubeconfig files that point to organization and project control plane API paths for the notes multi-cluster tests. This allows the tests to access control planes through Milo's aggregated API server paths rather than separate cluster contexts. Also removes the multicluster test exclusion from Taskfile since these tests should now work in CI. Co-Authored-By: Claude Opus 4.5 --- Taskfile.yaml | 2 +- .../chainsaw-test.yaml | 2 -- .../kubeconfig-main | 18 ++++++++++++++++++ .../kubeconfig-org | 18 ++++++++++++++++++ .../kubeconfig-project-1 | 18 ++++++++++++++++++ .../chainsaw-test.yaml | 2 -- .../note-multicluster-subject/kubeconfig-main | 18 ++++++++++++++++++ .../note-multicluster-subject/kubeconfig-org | 18 ++++++++++++++++++ .../kubeconfig-project-1 | 18 ++++++++++++++++++ 9 files changed, 109 insertions(+), 5 deletions(-) create mode 100644 test/notes/clusternote-multicluster-subject/kubeconfig-main create mode 100644 test/notes/clusternote-multicluster-subject/kubeconfig-org create mode 100644 test/notes/clusternote-multicluster-subject/kubeconfig-project-1 create mode 100644 test/notes/note-multicluster-subject/kubeconfig-main create mode 100644 test/notes/note-multicluster-subject/kubeconfig-org create mode 100644 test/notes/note-multicluster-subject/kubeconfig-project-1 diff --git a/Taskfile.yaml b/Taskfile.yaml index e7435ec7..be9302be 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -274,7 +274,7 @@ tasks: fi echo "Test paths: $TEST_PATHS" - KUBECONFIG=.milo/kubeconfig "{{.TOOL_DIR}}/chainsaw" test $TEST_PATHS --selector "requires!=authorization-provider,requires!=multicluster" + KUBECONFIG=.milo/kubeconfig "{{.TOOL_DIR}}/chainsaw" test $TEST_PATHS --selector "requires!=authorization-provider" silent: true test:unit: diff --git a/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml b/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml index 3e0297f1..fd95010c 100644 --- a/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml +++ b/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml @@ -2,8 +2,6 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: clusternote-multicluster-subject - labels: - requires: multicluster spec: description: | Tests that ClusterNotes can reference cluster-scoped subjects (Namespaces) diff --git a/test/notes/clusternote-multicluster-subject/kubeconfig-main b/test/notes/clusternote-multicluster-subject/kubeconfig-main new file mode 100644 index 00000000..b0496e6c --- /dev/null +++ b/test/notes/clusternote-multicluster-subject/kubeconfig-main @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: https://localhost:30443 + name: milo-test-infra +contexts: +- context: + cluster: milo-test-infra + user: admin + name: milo-test-infra +current-context: milo-test-infra +kind: Config +preferences: {} +users: +- name: admin + user: + token: test-admin-token diff --git a/test/notes/clusternote-multicluster-subject/kubeconfig-org b/test/notes/clusternote-multicluster-subject/kubeconfig-org new file mode 100644 index 00000000..6159602c --- /dev/null +++ b/test/notes/clusternote-multicluster-subject/kubeconfig-org @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: https://localhost:30443/apis/resourcemanager.miloapis.com/v1alpha1/organizations/cn-mc-test-org/control-plane + name: org-cn-mc-test-org +contexts: +- context: + cluster: org-cn-mc-test-org + user: user-1001 + name: org-cn-mc-test-org +current-context: org-cn-mc-test-org +kind: Config +preferences: {} +users: +- name: user-1001 + user: + token: test-admin-token diff --git a/test/notes/clusternote-multicluster-subject/kubeconfig-project-1 b/test/notes/clusternote-multicluster-subject/kubeconfig-project-1 new file mode 100644 index 00000000..9bf46b62 --- /dev/null +++ b/test/notes/clusternote-multicluster-subject/kubeconfig-project-1 @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: https://localhost:30443/apis/resourcemanager.miloapis.com/v1alpha1/projects/cn-mc-test-project-1/control-plane + name: project-cn-mc-test-project-1 +contexts: +- context: + cluster: project-cn-mc-test-project-1 + user: user-1001 + name: project-cn-mc-test-project-1 +current-context: project-cn-mc-test-project-1 +kind: Config +preferences: {} +users: +- name: user-1001 + user: + token: test-admin-token diff --git a/test/notes/note-multicluster-subject/chainsaw-test.yaml b/test/notes/note-multicluster-subject/chainsaw-test.yaml index d0ce5b97..0bbfe7fd 100644 --- a/test/notes/note-multicluster-subject/chainsaw-test.yaml +++ b/test/notes/note-multicluster-subject/chainsaw-test.yaml @@ -2,8 +2,6 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: note-multicluster-subject - labels: - requires: multicluster spec: description: | Tests that Notes can reference subjects (ConfigMaps) in project control planes. diff --git a/test/notes/note-multicluster-subject/kubeconfig-main b/test/notes/note-multicluster-subject/kubeconfig-main new file mode 100644 index 00000000..b0496e6c --- /dev/null +++ b/test/notes/note-multicluster-subject/kubeconfig-main @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: https://localhost:30443 + name: milo-test-infra +contexts: +- context: + cluster: milo-test-infra + user: admin + name: milo-test-infra +current-context: milo-test-infra +kind: Config +preferences: {} +users: +- name: admin + user: + token: test-admin-token diff --git a/test/notes/note-multicluster-subject/kubeconfig-org b/test/notes/note-multicluster-subject/kubeconfig-org new file mode 100644 index 00000000..86c9bdb8 --- /dev/null +++ b/test/notes/note-multicluster-subject/kubeconfig-org @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: https://localhost:30443/apis/resourcemanager.miloapis.com/v1alpha1/organizations/note-mc-test-org/control-plane + name: org-note-mc-test-org +contexts: +- context: + cluster: org-note-mc-test-org + user: user-1001 + name: org-note-mc-test-org +current-context: org-note-mc-test-org +kind: Config +preferences: {} +users: +- name: user-1001 + user: + token: test-admin-token diff --git a/test/notes/note-multicluster-subject/kubeconfig-project-1 b/test/notes/note-multicluster-subject/kubeconfig-project-1 new file mode 100644 index 00000000..044a3de1 --- /dev/null +++ b/test/notes/note-multicluster-subject/kubeconfig-project-1 @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: https://localhost:30443/apis/resourcemanager.miloapis.com/v1alpha1/projects/note-mc-test-project-1/control-plane + name: project-note-mc-test-project-1 +contexts: +- context: + cluster: project-note-mc-test-project-1 + user: user-1001 + name: project-note-mc-test-project-1 +current-context: project-note-mc-test-project-1 +kind: Config +preferences: {} +users: +- name: user-1001 + user: + token: test-admin-token From bc4bf85f5a618f8a3e9cde3b57fc98664e038c26 Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Fri, 27 Feb 2026 13:31:21 -0600 Subject: [PATCH 3/7] fix(test): remove Ready condition wait from notes multi-cluster tests The notes controller doesn't watch project control planes, so the Ready condition will never be set for notes created there. The tests now only verify that owner references are set by the webhook, which is the actual functionality being tested. Co-Authored-By: Claude Opus 4.5 --- .../chainsaw-test.yaml | 17 +++-------------- .../chainsaw-test.yaml | 18 +++--------------- 2 files changed, 6 insertions(+), 29 deletions(-) diff --git a/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml b/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml index fd95010c..515264d7 100644 --- a/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml +++ b/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml @@ -75,20 +75,9 @@ spec: try: - apply: file: test-data/clusternote.yaml - - wait: - apiVersion: notes.miloapis.com/v1alpha1 - kind: ClusterNote - name: test-namespace-clusternote - timeout: 30s - for: - condition: - name: Ready - value: 'True' - - - name: verify-owner-reference - description: Verify owner reference points to Namespace - cluster: project - try: + # Note: We don't wait for Ready condition because the notes controller + # doesn't watch project control planes. Instead, we verify the owner + # reference was set by the webhook. - assert: resource: apiVersion: notes.miloapis.com/v1alpha1 diff --git a/test/notes/note-multicluster-subject/chainsaw-test.yaml b/test/notes/note-multicluster-subject/chainsaw-test.yaml index 0bbfe7fd..0794dc44 100644 --- a/test/notes/note-multicluster-subject/chainsaw-test.yaml +++ b/test/notes/note-multicluster-subject/chainsaw-test.yaml @@ -72,21 +72,9 @@ spec: try: - apply: file: test-data/note.yaml - - wait: - apiVersion: notes.miloapis.com/v1alpha1 - kind: Note - name: test-configmap-note - namespace: default - timeout: 30s - for: - condition: - name: Ready - value: 'True' - - - name: verify-owner-reference - description: Verify owner reference points to ConfigMap - cluster: project - try: + # Note: We don't wait for Ready condition because the notes controller + # doesn't watch project control planes. Instead, we verify the owner + # reference was set by the webhook. - assert: resource: apiVersion: notes.miloapis.com/v1alpha1 From 8e5c78108652cc1e02081be23d6bd681287d1e54 Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Fri, 27 Feb 2026 14:51:28 -0600 Subject: [PATCH 4/7] chore: remove inline comments from test files Co-Authored-By: Claude Opus 4.5 --- test/notes/clusternote-multicluster-subject/chainsaw-test.yaml | 3 --- test/notes/note-multicluster-subject/chainsaw-test.yaml | 3 --- 2 files changed, 6 deletions(-) diff --git a/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml b/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml index 515264d7..09053948 100644 --- a/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml +++ b/test/notes/clusternote-multicluster-subject/chainsaw-test.yaml @@ -75,9 +75,6 @@ spec: try: - apply: file: test-data/clusternote.yaml - # Note: We don't wait for Ready condition because the notes controller - # doesn't watch project control planes. Instead, we verify the owner - # reference was set by the webhook. - assert: resource: apiVersion: notes.miloapis.com/v1alpha1 diff --git a/test/notes/note-multicluster-subject/chainsaw-test.yaml b/test/notes/note-multicluster-subject/chainsaw-test.yaml index 0794dc44..2072c1c9 100644 --- a/test/notes/note-multicluster-subject/chainsaw-test.yaml +++ b/test/notes/note-multicluster-subject/chainsaw-test.yaml @@ -72,9 +72,6 @@ spec: try: - apply: file: test-data/note.yaml - # Note: We don't wait for Ready condition because the notes controller - # doesn't watch project control planes. Instead, we verify the owner - # reference was set by the webhook. - assert: resource: apiVersion: notes.miloapis.com/v1alpha1 From b8b4400a0307c7aa05555adce04afb0150326876 Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Fri, 27 Feb 2026 14:58:52 -0600 Subject: [PATCH 5/7] refactor(notes): simplify multicluster webhook to use project context Instead of iterating all clusters to find subject resources, the notes webhooks now use the project context from UserInfo.Extra to determine the specific project control plane to query. Changes: - Remove ClusterGetter interface and MultiClusterGetter implementation - Remove ListEngagedClusters() from provider (no longer needed) - Use UserInfo.Extra[ParentKindExtraKey/ParentNameExtraKey] to detect project context and get the specific project control plane client - Pass mcmanager.Manager directly to webhooks instead of ClusterGetter This is more efficient and semantically correct - we only need to check the control plane where the note is being created. Co-Authored-By: Claude Opus 4.5 --- .../controller-manager/controllermanager.go | 15 ++-- .../notes/v1alpha1/clusternote_webhook.go | 76 ++++++++---------- .../v1alpha1/clusternote_webhook_test.go | 8 +- .../webhooks/notes/v1alpha1/multicluster.go | 39 ---------- .../webhooks/notes/v1alpha1/note_webhook.go | 78 ++++++++----------- .../notes/v1alpha1/note_webhook_test.go | 8 +- pkg/multicluster-runtime/milo/provider.go | 13 ---- 7 files changed, 80 insertions(+), 157 deletions(-) delete mode 100644 internal/webhooks/notes/v1alpha1/multicluster.go diff --git a/cmd/milo/controller-manager/controllermanager.go b/cmd/milo/controller-manager/controllermanager.go index d5b52cf7..b36cb33e 100644 --- a/cmd/milo/controller-manager/controllermanager.go +++ b/cmd/milo/controller-manager/controllermanager.go @@ -553,9 +553,8 @@ func Run(ctx context.Context, c *config.CompletedConfig, opts *Options) error { logger.Error(err, "Error setting up platform access rejection webhook") klog.FlushAndExit(klog.ExitFlushTimeout, 1) } - // Note webhooks are set up later after the multicluster provider is created, - // so they can share it for cross-cluster subject lookups. - // See the noteClusterGetter setup after the quota multicluster manager. + // Note webhooks are set up later after the multicluster manager is created, + // so they can use it for project control plane lookups. projectCtrl := resourcemanagercontroller.ProjectController{ ControlPlaneClient: ctrl.GetClient(), @@ -661,16 +660,12 @@ func Run(ctx context.Context, c *config.CompletedConfig, opts *Options) error { } logger.Info("Local cluster engaged successfully") - // Set up note webhooks using the shared multicluster provider for cross-cluster subject lookups - noteClusterGetter := ¬esv1alpha1webhook.MultiClusterGetter{ - Manager: mcMgr, - Provider: provider, - } - if err := notesv1alpha1webhook.SetupNoteWebhooksWithManager(ctrl, noteClusterGetter); err != nil { + // Set up note webhooks with multicluster manager for project control plane lookups + if err := notesv1alpha1webhook.SetupNoteWebhooksWithManager(ctrl, mcMgr); err != nil { logger.Error(err, "Error setting up note webhook") klog.FlushAndExit(klog.ExitFlushTimeout, 1) } - if err := notesv1alpha1webhook.SetupClusterNoteWebhooksWithManager(ctrl, noteClusterGetter); err != nil { + if err := notesv1alpha1webhook.SetupClusterNoteWebhooksWithManager(ctrl, mcMgr); err != nil { logger.Error(err, "Error setting up clusternote webhook") klog.FlushAndExit(klog.ExitFlushTimeout, 1) } diff --git a/internal/webhooks/notes/v1alpha1/clusternote_webhook.go b/internal/webhooks/notes/v1alpha1/clusternote_webhook.go index 2097efd6..d294a4e7 100644 --- a/internal/webhooks/notes/v1alpha1/clusternote_webhook.go +++ b/internal/webhooks/notes/v1alpha1/clusternote_webhook.go @@ -19,19 +19,20 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" ) var clusterNoteLog = logf.Log.WithName("clusternote-resource") -func SetupClusterNoteWebhooksWithManager(mgr ctrl.Manager, clusters ClusterGetter) error { +func SetupClusterNoteWebhooksWithManager(mgr ctrl.Manager, mcMgr mcmanager.Manager) error { clusterNoteLog.Info("Setting up notes.miloapis.com clusternote webhooks") return ctrl.NewWebhookManagedBy(mgr). For(¬esv1alpha1.ClusterNote{}). WithDefaulter(&ClusterNoteMutator{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - RESTMapper: mgr.GetRESTMapper(), - Clusters: clusters, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + RESTMapper: mgr.GetRESTMapper(), + ClusterManager: mcMgr, }). WithValidator(&ClusterNoteValidator{ Client: mgr.GetClient(), @@ -42,10 +43,10 @@ func SetupClusterNoteWebhooksWithManager(mgr ctrl.Manager, clusters ClusterGette // +kubebuilder:webhook:path=/mutate-notes-miloapis-com-v1alpha1-clusternote,mutating=true,failurePolicy=fail,sideEffects=None,groups=notes.miloapis.com,resources=clusternotes,verbs=create,versions=v1alpha1,name=mclusternote.notes.miloapis.com,admissionReviewVersions={v1,v1beta1},serviceName=milo-controller-manager,servicePort=9443,serviceNamespace=milo-system type ClusterNoteMutator struct { - Client client.Client - Scheme *runtime.Scheme - RESTMapper meta.RESTMapper - Clusters ClusterGetter // For multi-cluster lookups + Client client.Client + Scheme *runtime.Scheme + RESTMapper meta.RESTMapper + ClusterManager mcmanager.Manager } var _ admission.CustomDefaulter = &ClusterNoteMutator{} @@ -72,8 +73,7 @@ func (m *ClusterNoteMutator) Default(ctx context.Context, obj runtime.Object) er } // Set owner reference to the subject resource for automatic garbage collection - // This is critical for the ClusterNote to be garbage collected when the subject is deleted - if err := m.setSubjectOwnerReference(ctx, clusterNote); err != nil { + if err := m.setSubjectOwnerReference(ctx, clusterNote, req); err != nil { clusterNoteLog.Error(err, "Failed to set owner reference to subject", "clusternote", clusterNote.Name) return errors.NewInternalError(fmt.Errorf("failed to set owner reference to subject: %w", err)) } @@ -82,7 +82,7 @@ func (m *ClusterNoteMutator) Default(ctx context.Context, obj runtime.Object) er } // setSubjectOwnerReference sets the owner reference to the subject resource if it's cluster-scoped -func (m *ClusterNoteMutator) setSubjectOwnerReference(ctx context.Context, clusterNote *notesv1alpha1.ClusterNote) error { +func (m *ClusterNoteMutator) setSubjectOwnerReference(ctx context.Context, clusterNote *notesv1alpha1.ClusterNote, req admission.Request) error { // ClusterNote can only have owner references to other cluster-scoped resources if clusterNote.Spec.SubjectRef.Namespace != "" { return nil // Subject is namespaced, can't set owner reference on cluster-scoped resource @@ -103,44 +103,34 @@ func (m *ClusterNoteMutator) setSubjectOwnerReference(ctx context.Context, clust Name: clusterNote.Spec.SubjectRef.Name, } - // If no multi-cluster support, fall back to local client only - if m.Clusters == nil { - subject := &unstructured.Unstructured{} - subject.SetGroupVersionKind(mapping.GroupVersionKind) - if err := m.Client.Get(ctx, key, subject); err != nil { - if errors.IsNotFound(err) { - return fmt.Errorf("subject resource not found: %w", err) + // Determine which client to use based on project context + subjectClient := m.Client + + // Check if this is a project control plane request + if m.ClusterManager != nil { + if parentKinds, ok := req.UserInfo.Extra[iamv1alpha1.ParentKindExtraKey]; ok && len(parentKinds) > 0 && parentKinds[0] == "Project" { + if parentNames, ok := req.UserInfo.Extra[iamv1alpha1.ParentNameExtraKey]; ok && len(parentNames) > 0 { + projectName := parentNames[0] + cluster, err := m.ClusterManager.GetCluster(ctx, projectName) + if err != nil { + return fmt.Errorf("failed to get project control plane %s: %w", projectName, err) + } + subjectClient = cluster.GetClient() + clusterNoteLog.V(1).Info("Using project control plane client", "project", projectName) } - return fmt.Errorf("failed to get subject resource: %w", err) } - return controllerutil.SetOwnerReference(subject, clusterNote, m.Scheme) } - // Search all clusters (local first, then project control planes) - for _, clusterName := range m.Clusters.ListClusterNames() { - cluster, err := m.Clusters.GetCluster(ctx, clusterName) - if err != nil { - clusterNoteLog.V(1).Info("Failed to get cluster", "cluster", clusterName, "error", err) - continue - } - - subject := &unstructured.Unstructured{} - subject.SetGroupVersionKind(mapping.GroupVersionKind) - - if err := cluster.GetClient().Get(ctx, key, subject); err == nil { - if clusterName != "" { - clusterNoteLog.Info("Found subject in project control plane", - "cluster", clusterName, - "subject", key) - } - return controllerutil.SetOwnerReference(subject, clusterNote, m.Scheme) + subject := &unstructured.Unstructured{} + subject.SetGroupVersionKind(mapping.GroupVersionKind) + if err := subjectClient.Get(ctx, key, subject); err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("subject resource not found: %w", err) } + return fmt.Errorf("failed to get subject resource: %w", err) } - return fmt.Errorf("subject resource not found in any cluster: %s/%s %s", - clusterNote.Spec.SubjectRef.APIGroup, - clusterNote.Spec.SubjectRef.Kind, - clusterNote.Spec.SubjectRef.Name) + return controllerutil.SetOwnerReference(subject, clusterNote, m.Scheme) } // +kubebuilder:webhook:path=/validate-notes-miloapis-com-v1alpha1-clusternote,mutating=false,failurePolicy=fail,sideEffects=None,groups=notes.miloapis.com,resources=clusternotes,verbs=create;update,versions=v1alpha1,name=vclusternote.notes.miloapis.com,admissionReviewVersions={v1,v1beta1},serviceName=milo-controller-manager,servicePort=9443,serviceNamespace=milo-system diff --git a/internal/webhooks/notes/v1alpha1/clusternote_webhook_test.go b/internal/webhooks/notes/v1alpha1/clusternote_webhook_test.go index e3a0b393..a69777f5 100644 --- a/internal/webhooks/notes/v1alpha1/clusternote_webhook_test.go +++ b/internal/webhooks/notes/v1alpha1/clusternote_webhook_test.go @@ -126,10 +126,10 @@ func TestClusterNoteMutator_Default(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(testScheme).WithObjects(objects...).Build() mutator := &ClusterNoteMutator{ - Client: fakeClient, - Scheme: testScheme, - RESTMapper: newTestRESTMapper(), - Clusters: nil, // nil means local-only search (backwards compatible) + Client: fakeClient, + Scheme: testScheme, + RESTMapper: newTestRESTMapper(), + ClusterManager: nil, // nil means local-only search (backwards compatible) } // Create admission request context diff --git a/internal/webhooks/notes/v1alpha1/multicluster.go b/internal/webhooks/notes/v1alpha1/multicluster.go deleted file mode 100644 index d68140b3..00000000 --- a/internal/webhooks/notes/v1alpha1/multicluster.go +++ /dev/null @@ -1,39 +0,0 @@ -package v1alpha1 - -import ( - "context" - - miloprovider "go.miloapis.com/milo/pkg/multicluster-runtime/milo" - "sigs.k8s.io/controller-runtime/pkg/cluster" - mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" -) - -// ClusterGetter provides access to clusters in the multicluster runtime. -type ClusterGetter interface { - // GetCluster returns a cluster by name. Empty string returns the local cluster. - GetCluster(ctx context.Context, name string) (cluster.Cluster, error) - // ListClusterNames returns names of all engaged clusters including "" for local. - ListClusterNames() []string -} - -// MultiClusterGetter implements ClusterGetter using mcmanager and provider. -type MultiClusterGetter struct { - Manager mcmanager.Manager - Provider *miloprovider.Provider -} - -// GetCluster returns a cluster by name. Empty string returns the local cluster. -func (g *MultiClusterGetter) GetCluster(ctx context.Context, name string) (cluster.Cluster, error) { - return g.Manager.GetCluster(ctx, name) -} - -// ListClusterNames returns names of all engaged clusters including "" for local. -func (g *MultiClusterGetter) ListClusterNames() []string { - // Start with local cluster (empty string) - names := []string{""} - // Add all project control planes - if g.Provider != nil { - names = append(names, g.Provider.ListEngagedClusters()...) - } - return names -} diff --git a/internal/webhooks/notes/v1alpha1/note_webhook.go b/internal/webhooks/notes/v1alpha1/note_webhook.go index 1b523627..41b964a7 100644 --- a/internal/webhooks/notes/v1alpha1/note_webhook.go +++ b/internal/webhooks/notes/v1alpha1/note_webhook.go @@ -19,19 +19,20 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" ) var noteLog = logf.Log.WithName("note-resource") -func SetupNoteWebhooksWithManager(mgr ctrl.Manager, clusters ClusterGetter) error { +func SetupNoteWebhooksWithManager(mgr ctrl.Manager, mcMgr mcmanager.Manager) error { noteLog.Info("Setting up notes.miloapis.com note webhooks") return ctrl.NewWebhookManagedBy(mgr). For(¬esv1alpha1.Note{}). WithDefaulter(&NoteMutator{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - RESTMapper: mgr.GetRESTMapper(), - Clusters: clusters, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + RESTMapper: mgr.GetRESTMapper(), + ClusterManager: mcMgr, }). WithValidator(&NoteValidator{ Client: mgr.GetClient(), @@ -42,10 +43,10 @@ func SetupNoteWebhooksWithManager(mgr ctrl.Manager, clusters ClusterGetter) erro // +kubebuilder:webhook:path=/mutate-notes-miloapis-com-v1alpha1-note,mutating=true,failurePolicy=fail,sideEffects=None,groups=notes.miloapis.com,resources=notes,verbs=create,versions=v1alpha1,name=mnote.notes.miloapis.com,admissionReviewVersions={v1,v1beta1},serviceName=milo-controller-manager,servicePort=9443,serviceNamespace=milo-system type NoteMutator struct { - Client client.Client - Scheme *runtime.Scheme - RESTMapper meta.RESTMapper - Clusters ClusterGetter // For multi-cluster lookups + Client client.Client + Scheme *runtime.Scheme + RESTMapper meta.RESTMapper + ClusterManager mcmanager.Manager } var _ admission.CustomDefaulter = &NoteMutator{} @@ -72,8 +73,7 @@ func (m *NoteMutator) Default(ctx context.Context, obj runtime.Object) error { } // Set owner reference to the subject resource for automatic garbage collection - // This is critical for the Note to be garbage collected when the subject is deleted - if err := m.setSubjectOwnerReference(ctx, note); err != nil { + if err := m.setSubjectOwnerReference(ctx, note, req); err != nil { noteLog.Error(err, "Failed to set owner reference to subject", "note", note.Name) return errors.NewInternalError(fmt.Errorf("failed to set owner reference to subject: %w", err)) } @@ -82,13 +82,13 @@ func (m *NoteMutator) Default(ctx context.Context, obj runtime.Object) error { } // setSubjectOwnerReference sets the owner reference to the subject resource if it's in the same namespace -func (m *NoteMutator) setSubjectOwnerReference(ctx context.Context, note *notesv1alpha1.Note) error { +func (m *NoteMutator) setSubjectOwnerReference(ctx context.Context, note *notesv1alpha1.Note, req admission.Request) error { // Only set owner reference if the subject is in the same namespace if note.Spec.SubjectRef.Namespace == "" || note.Spec.SubjectRef.Namespace != note.Namespace { return nil } - // Resolve the GVK using REST mapper to discover the correct API version + // Resolve the GVK using REST mapper groupKind := schema.GroupKind{ Group: note.Spec.SubjectRef.APIGroup, Kind: note.Spec.SubjectRef.Kind, @@ -104,44 +104,34 @@ func (m *NoteMutator) setSubjectOwnerReference(ctx context.Context, note *notesv Namespace: note.Spec.SubjectRef.Namespace, } - // If no multi-cluster support, fall back to local client only - if m.Clusters == nil { - subject := &unstructured.Unstructured{} - subject.SetGroupVersionKind(mapping.GroupVersionKind) - if err := m.Client.Get(ctx, key, subject); err != nil { - if errors.IsNotFound(err) { - return fmt.Errorf("subject resource not found: %w", err) + // Determine which client to use based on project context + subjectClient := m.Client + + // Check if this is a project control plane request + if m.ClusterManager != nil { + if parentKinds, ok := req.UserInfo.Extra[iamv1alpha1.ParentKindExtraKey]; ok && len(parentKinds) > 0 && parentKinds[0] == "Project" { + if parentNames, ok := req.UserInfo.Extra[iamv1alpha1.ParentNameExtraKey]; ok && len(parentNames) > 0 { + projectName := parentNames[0] + cluster, err := m.ClusterManager.GetCluster(ctx, projectName) + if err != nil { + return fmt.Errorf("failed to get project control plane %s: %w", projectName, err) + } + subjectClient = cluster.GetClient() + noteLog.V(1).Info("Using project control plane client", "project", projectName) } - return fmt.Errorf("failed to get subject resource: %w", err) } - return controllerutil.SetOwnerReference(subject, note, m.Scheme) } - // Search all clusters (local first, then project control planes) - for _, clusterName := range m.Clusters.ListClusterNames() { - cluster, err := m.Clusters.GetCluster(ctx, clusterName) - if err != nil { - noteLog.V(1).Info("Failed to get cluster", "cluster", clusterName, "error", err) - continue - } - - subject := &unstructured.Unstructured{} - subject.SetGroupVersionKind(mapping.GroupVersionKind) - - if err := cluster.GetClient().Get(ctx, key, subject); err == nil { - if clusterName != "" { - noteLog.Info("Found subject in project control plane", - "cluster", clusterName, - "subject", key) - } - return controllerutil.SetOwnerReference(subject, note, m.Scheme) + subject := &unstructured.Unstructured{} + subject.SetGroupVersionKind(mapping.GroupVersionKind) + if err := subjectClient.Get(ctx, key, subject); err != nil { + if errors.IsNotFound(err) { + return fmt.Errorf("subject resource not found: %w", err) } + return fmt.Errorf("failed to get subject resource: %w", err) } - return fmt.Errorf("subject resource not found in any cluster: %s/%s %s", - note.Spec.SubjectRef.APIGroup, - note.Spec.SubjectRef.Kind, - note.Spec.SubjectRef.Name) + return controllerutil.SetOwnerReference(subject, note, m.Scheme) } // +kubebuilder:webhook:path=/validate-notes-miloapis-com-v1alpha1-note,mutating=false,failurePolicy=fail,sideEffects=None,groups=notes.miloapis.com,resources=notes,verbs=create;update,versions=v1alpha1,name=vnote.notes.miloapis.com,admissionReviewVersions={v1,v1beta1},serviceName=milo-controller-manager,servicePort=9443,serviceNamespace=milo-system diff --git a/internal/webhooks/notes/v1alpha1/note_webhook_test.go b/internal/webhooks/notes/v1alpha1/note_webhook_test.go index d6ffb9bb..08746ef9 100644 --- a/internal/webhooks/notes/v1alpha1/note_webhook_test.go +++ b/internal/webhooks/notes/v1alpha1/note_webhook_test.go @@ -160,10 +160,10 @@ func TestNoteMutator_Default(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(testScheme).WithObjects(objects...).Build() mutator := &NoteMutator{ - Client: fakeClient, - Scheme: testScheme, - RESTMapper: newTestRESTMapper(), - Clusters: nil, // nil means local-only search (backwards compatible) + Client: fakeClient, + Scheme: testScheme, + RESTMapper: newTestRESTMapper(), + ClusterManager: nil, // nil means local-only search (backwards compatible) } // Create admission request context diff --git a/pkg/multicluster-runtime/milo/provider.go b/pkg/multicluster-runtime/milo/provider.go index bfe0e4c0..2055d331 100644 --- a/pkg/multicluster-runtime/milo/provider.go +++ b/pkg/multicluster-runtime/milo/provider.go @@ -298,19 +298,6 @@ func (p *Provider) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result return ctrl.Result{}, nil } -// ListEngagedClusters returns the names of all currently engaged project clusters. -// Note: This does not include the local cluster (empty string key). -func (p *Provider) ListEngagedClusters() []string { - p.lock.Lock() - defer p.lock.Unlock() - - names := make([]string, 0, len(p.projects)) - for name := range p.projects { - names = append(names, name) - } - return names -} - func (p *Provider) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { p.lock.Lock() defer p.lock.Unlock() From f19b32de47fedc6a88b7cc3e9f191b1b13f32a42 Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Fri, 27 Feb 2026 15:19:21 -0600 Subject: [PATCH 6/7] fix(multicluster): use project name as cluster key for webhook lookups The provider was using req.String() (namespace/name format) as the cluster key, but the webhook only has access to the project name from ParentNameExtraKey. This caused "cluster not found" errors when the webhook tried to look up the project control plane. Changed the provider to use just req.Name as the key, which matches: - The project name in URL paths (/projects/{name}/control-plane/...) - The project name in ParentNameExtraKey Co-Authored-By: Claude Opus 4.5 --- pkg/multicluster-runtime/milo/provider.go | 4 +++- pkg/multicluster-runtime/milo/provider_test.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/multicluster-runtime/milo/provider.go b/pkg/multicluster-runtime/milo/provider.go index 2055d331..e71bbd2a 100644 --- a/pkg/multicluster-runtime/milo/provider.go +++ b/pkg/multicluster-runtime/milo/provider.go @@ -156,7 +156,9 @@ func (p *Provider) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result log := p.log.WithValues("project", req.Name) log.Info("Reconciling Project") - key := req.String() + // Use just the project name as the key for cluster lookup. + // This matches the project name used in URL paths and ParentNameExtraKey. + key := req.Name var project unstructured.Unstructured if p.opts.InternalServiceDiscovery { diff --git a/pkg/multicluster-runtime/milo/provider_test.go b/pkg/multicluster-runtime/milo/provider_test.go index 449c0158..7f1283f3 100644 --- a/pkg/multicluster-runtime/milo/provider_test.go +++ b/pkg/multicluster-runtime/milo/provider_test.go @@ -61,7 +61,7 @@ func TestReadyProject(t *testing.T) { assert.Zero(t, result.RequeueAfter) assert.Len(t, provider.projects, 1) - cl, err := provider.Get(context.Background(), "/test-project") + cl, err := provider.Get(context.Background(), "test-project") assert.NoError(t, err) apiHost, err := url.Parse(cl.GetConfig().Host) assert.NoError(t, err) From f54cee1031a0cfa6c2f7fbeab6f451a13f9397a5 Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Fri, 27 Feb 2026 15:52:12 -0600 Subject: [PATCH 7/7] refactor(webhook): add ClusterAwareServer wrapper for multi-cluster context Introduce a reusable ClusterAwareServer wrapper that automatically injects project control plane context from UserInfo.Extra into the request context. This allows webhook handlers to use mccontext.ClusterFrom(ctx) to determine which project control plane the request is targeting. This pattern matches the approach used by external services like the network-services-operator and provides a cleaner separation of concerns. Changes: - Add pkg/webhook/cluster_aware_server.go as an exported library - Update note and clusternote webhooks to use mccontext.ClusterFrom(ctx) - Wrap the webhook server in controller manager with ClusterAwareServer Co-Authored-By: Claude Opus 4.5 --- .../controller-manager/controllermanager.go | 9 ++- .../notes/v1alpha1/clusternote_webhook.go | 26 +++---- .../webhooks/notes/v1alpha1/note_webhook.go | 26 +++---- pkg/webhook/cluster_aware_server.go | 75 +++++++++++++++++++ 4 files changed, 106 insertions(+), 30 deletions(-) create mode 100644 pkg/webhook/cluster_aware_server.go diff --git a/cmd/milo/controller-manager/controllermanager.go b/cmd/milo/controller-manager/controllermanager.go index b36cb33e..20793e0f 100644 --- a/cmd/milo/controller-manager/controllermanager.go +++ b/cmd/milo/controller-manager/controllermanager.go @@ -98,6 +98,7 @@ import ( quotav1alpha1 "go.miloapis.com/milo/pkg/apis/quota/v1alpha1" resourcemanagerv1alpha1 "go.miloapis.com/milo/pkg/apis/resourcemanager/v1alpha1" miloprovider "go.miloapis.com/milo/pkg/multicluster-runtime/milo" + milowebhook "go.miloapis.com/milo/pkg/webhook" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" ) @@ -471,7 +472,11 @@ func Run(ctx context.Context, c *config.CompletedConfig, opts *Options) error { MapperProvider: func(c *restclient.Config, httpClient *http.Client) (meta.RESTMapper, error) { return controllerContext.RESTMapper, nil }, - WebhookServer: webhook.NewServer(webhook.Options{ + // Wrap the webhook server with ClusterAwareServer to automatically inject + // project control plane context from UserInfo.Extra into the request context. + // This allows webhook handlers to use mccontext.ClusterFrom(ctx) to determine + // which project control plane the request is targeting. + WebhookServer: milowebhook.NewClusterAwareServer(webhook.NewServer(webhook.Options{ Port: opts.ControllerRuntimeWebhookPort, CertDir: opts.SecureServing.ServerCert.CertDirectory, // The webhook server expects the key and cert files to be in the @@ -481,7 +486,7 @@ func Run(ctx context.Context, c *config.CompletedConfig, opts *Options) error { // key and cert file names. KeyName: strings.TrimPrefix(opts.SecureServing.ServerCert.CertKey.KeyFile, opts.SecureServing.ServerCert.CertDirectory+"/"), CertName: strings.TrimPrefix(opts.SecureServing.ServerCert.CertKey.CertFile, opts.SecureServing.ServerCert.CertDirectory+"/"), - }), + })), }, ) if err != nil { diff --git a/internal/webhooks/notes/v1alpha1/clusternote_webhook.go b/internal/webhooks/notes/v1alpha1/clusternote_webhook.go index d294a4e7..689b3d64 100644 --- a/internal/webhooks/notes/v1alpha1/clusternote_webhook.go +++ b/internal/webhooks/notes/v1alpha1/clusternote_webhook.go @@ -19,6 +19,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + mccontext "sigs.k8s.io/multicluster-runtime/pkg/context" mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" ) @@ -73,7 +74,7 @@ func (m *ClusterNoteMutator) Default(ctx context.Context, obj runtime.Object) er } // Set owner reference to the subject resource for automatic garbage collection - if err := m.setSubjectOwnerReference(ctx, clusterNote, req); err != nil { + if err := m.setSubjectOwnerReference(ctx, clusterNote); err != nil { clusterNoteLog.Error(err, "Failed to set owner reference to subject", "clusternote", clusterNote.Name) return errors.NewInternalError(fmt.Errorf("failed to set owner reference to subject: %w", err)) } @@ -81,8 +82,9 @@ func (m *ClusterNoteMutator) Default(ctx context.Context, obj runtime.Object) er return nil } -// setSubjectOwnerReference sets the owner reference to the subject resource if it's cluster-scoped -func (m *ClusterNoteMutator) setSubjectOwnerReference(ctx context.Context, clusterNote *notesv1alpha1.ClusterNote, req admission.Request) error { +// setSubjectOwnerReference sets the owner reference to the subject resource if it's cluster-scoped. +// The cluster context is expected to be injected by the ClusterAwareServer wrapper. +func (m *ClusterNoteMutator) setSubjectOwnerReference(ctx context.Context, clusterNote *notesv1alpha1.ClusterNote) error { // ClusterNote can only have owner references to other cluster-scoped resources if clusterNote.Spec.SubjectRef.Namespace != "" { return nil // Subject is namespaced, can't set owner reference on cluster-scoped resource @@ -103,21 +105,17 @@ func (m *ClusterNoteMutator) setSubjectOwnerReference(ctx context.Context, clust Name: clusterNote.Spec.SubjectRef.Name, } - // Determine which client to use based on project context + // Determine which client to use based on cluster context (injected by ClusterAwareServer) subjectClient := m.Client - // Check if this is a project control plane request if m.ClusterManager != nil { - if parentKinds, ok := req.UserInfo.Extra[iamv1alpha1.ParentKindExtraKey]; ok && len(parentKinds) > 0 && parentKinds[0] == "Project" { - if parentNames, ok := req.UserInfo.Extra[iamv1alpha1.ParentNameExtraKey]; ok && len(parentNames) > 0 { - projectName := parentNames[0] - cluster, err := m.ClusterManager.GetCluster(ctx, projectName) - if err != nil { - return fmt.Errorf("failed to get project control plane %s: %w", projectName, err) - } - subjectClient = cluster.GetClient() - clusterNoteLog.V(1).Info("Using project control plane client", "project", projectName) + if clusterName, ok := mccontext.ClusterFrom(ctx); ok && clusterName != "" { + cluster, err := m.ClusterManager.GetCluster(ctx, clusterName) + if err != nil { + return fmt.Errorf("failed to get project control plane %s: %w", clusterName, err) } + subjectClient = cluster.GetClient() + clusterNoteLog.V(1).Info("Using project control plane client", "cluster", clusterName) } } diff --git a/internal/webhooks/notes/v1alpha1/note_webhook.go b/internal/webhooks/notes/v1alpha1/note_webhook.go index 41b964a7..c0ce545f 100644 --- a/internal/webhooks/notes/v1alpha1/note_webhook.go +++ b/internal/webhooks/notes/v1alpha1/note_webhook.go @@ -19,6 +19,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + mccontext "sigs.k8s.io/multicluster-runtime/pkg/context" mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" ) @@ -73,7 +74,7 @@ func (m *NoteMutator) Default(ctx context.Context, obj runtime.Object) error { } // Set owner reference to the subject resource for automatic garbage collection - if err := m.setSubjectOwnerReference(ctx, note, req); err != nil { + if err := m.setSubjectOwnerReference(ctx, note); err != nil { noteLog.Error(err, "Failed to set owner reference to subject", "note", note.Name) return errors.NewInternalError(fmt.Errorf("failed to set owner reference to subject: %w", err)) } @@ -81,8 +82,9 @@ func (m *NoteMutator) Default(ctx context.Context, obj runtime.Object) error { return nil } -// setSubjectOwnerReference sets the owner reference to the subject resource if it's in the same namespace -func (m *NoteMutator) setSubjectOwnerReference(ctx context.Context, note *notesv1alpha1.Note, req admission.Request) error { +// setSubjectOwnerReference sets the owner reference to the subject resource if it's in the same namespace. +// The cluster context is expected to be injected by the ClusterAwareServer wrapper. +func (m *NoteMutator) setSubjectOwnerReference(ctx context.Context, note *notesv1alpha1.Note) error { // Only set owner reference if the subject is in the same namespace if note.Spec.SubjectRef.Namespace == "" || note.Spec.SubjectRef.Namespace != note.Namespace { return nil @@ -104,21 +106,17 @@ func (m *NoteMutator) setSubjectOwnerReference(ctx context.Context, note *notesv Namespace: note.Spec.SubjectRef.Namespace, } - // Determine which client to use based on project context + // Determine which client to use based on cluster context (injected by ClusterAwareServer) subjectClient := m.Client - // Check if this is a project control plane request if m.ClusterManager != nil { - if parentKinds, ok := req.UserInfo.Extra[iamv1alpha1.ParentKindExtraKey]; ok && len(parentKinds) > 0 && parentKinds[0] == "Project" { - if parentNames, ok := req.UserInfo.Extra[iamv1alpha1.ParentNameExtraKey]; ok && len(parentNames) > 0 { - projectName := parentNames[0] - cluster, err := m.ClusterManager.GetCluster(ctx, projectName) - if err != nil { - return fmt.Errorf("failed to get project control plane %s: %w", projectName, err) - } - subjectClient = cluster.GetClient() - noteLog.V(1).Info("Using project control plane client", "project", projectName) + if clusterName, ok := mccontext.ClusterFrom(ctx); ok && clusterName != "" { + cluster, err := m.ClusterManager.GetCluster(ctx, clusterName) + if err != nil { + return fmt.Errorf("failed to get project control plane %s: %w", clusterName, err) } + subjectClient = cluster.GetClient() + noteLog.V(1).Info("Using project control plane client", "cluster", clusterName) } } diff --git a/pkg/webhook/cluster_aware_server.go b/pkg/webhook/cluster_aware_server.go new file mode 100644 index 00000000..3b7c3c11 --- /dev/null +++ b/pkg/webhook/cluster_aware_server.go @@ -0,0 +1,75 @@ +// Package webhook provides multi-cluster aware webhook utilities for services +// that integrate with Milo's project control plane architecture. +package webhook + +import ( + "context" + "net/http" + + iamv1alpha1 "go.miloapis.com/milo/pkg/apis/iam/v1alpha1" + authv1 "k8s.io/api/authentication/v1" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + mccontext "sigs.k8s.io/multicluster-runtime/pkg/context" +) + +// ClusterAwareServer wraps a webhook.Server to automatically inject the cluster +// name from the request's UserInfo.Extra into the context. This allows webhook +// handlers to use mccontext.ClusterFrom(ctx) to determine which project control +// plane the request is targeting. +// +// The cluster name is extracted from the "iam.miloapis.com/parent-name" extra +// field, which is set by Milo's API server when requests target a project +// control plane via the aggregated API path. +type ClusterAwareServer struct { + webhook.Server +} + +var _ webhook.Server = &ClusterAwareServer{} + +// NewClusterAwareServer wraps a webhook.Server to inject cluster context from +// the request's UserInfo.Extra fields into the handler context. +// +// Example usage: +// +// webhookServer := webhook.NewServer(webhook.Options{...}) +// webhookServer = milowebhook.NewClusterAwareServer(webhookServer) +// mgr.Add(webhookServer) +func NewClusterAwareServer(server webhook.Server) *ClusterAwareServer { + return &ClusterAwareServer{ + Server: server, + } +} + +// Register wraps the webhook handler to inject cluster context before calling +// the original handler. +func (s *ClusterAwareServer) Register(path string, hook http.Handler) { + if h, ok := hook.(*admission.Webhook); ok { + orig := h.Handler + h.Handler = admission.HandlerFunc(func(ctx context.Context, req admission.Request) admission.Response { + clusterName := clusterNameFromExtra(req.UserInfo.Extra) + if clusterName != "" { + ctx = mccontext.WithCluster(ctx, clusterName) + } + return orig.Handle(ctx, req) + }) + } + + s.Server.Register(path, hook) +} + +// clusterNameFromExtra extracts the cluster/project name from the UserInfo.Extra +// fields. Returns empty string if not in a project context. +func clusterNameFromExtra(extra map[string]authv1.ExtraValue) string { + // Check if this is a project context + if parentKinds, ok := extra[iamv1alpha1.ParentKindExtraKey]; !ok || len(parentKinds) == 0 || parentKinds[0] != "Project" { + return "" + } + + // Extract the project name + if parentNames, ok := extra[iamv1alpha1.ParentNameExtraKey]; ok && len(parentNames) > 0 { + return parentNames[0] + } + + return "" +}