diff --git a/chart/seeder/templates/rbac.yaml b/chart/seeder/templates/rbac.yaml index 77257d3..6ff2298 100644 --- a/chart/seeder/templates/rbac.yaml +++ b/chart/seeder/templates/rbac.yaml @@ -92,6 +92,7 @@ rules: - admissionregistration.k8s.io resources: - validatingwebhookconfigurations + - mutatingwebhookconfigurations verbs: - get - watch diff --git a/pkg/api/v1alpha1/common.go b/pkg/api/v1alpha1/common.go index 59f2411..f22c861 100644 --- a/pkg/api/v1alpha1/common.go +++ b/pkg/api/v1alpha1/common.go @@ -28,4 +28,7 @@ const ( DefaultEndpointPort = 9090 DefaultSeederDeploymentService = "harvester-seeder-endpoint" DefaultHegelDeploymentEndpointLookup = "smee" + ClusterOwnerKey = "harvesterhci.io/clusterOwner" + ClusterOwnerDetailsKey = "harvesterhci.io/clusterOwnerDetails" + ExtraFieldKey = "username" ) diff --git a/pkg/controllers/cluster_controller.go b/pkg/controllers/cluster_controller.go index 835c3c4..3d41908 100644 --- a/pkg/controllers/cluster_controller.go +++ b/pkg/controllers/cluster_controller.go @@ -204,6 +204,10 @@ func (r *ClusterReconciler) patchNodesAndPools(ctx context.Context, cObj *seeder continue } + i, err = r.patchInventoryOwnership(ctx, cObj, i) + if err != nil { + return fmt.Errorf("error updating inventory ownership: %v", err) + } var found bool var nodeAddress string for address, nodeDetails := range pool.Status.AddressAllocation { @@ -775,3 +779,38 @@ func (r *ClusterReconciler) lockedAddressPoolUpdate(ctx context.Context, pool *s defer r.mutex.Unlock() return r.Status().Update(ctx, pool) } + +// patchInventoryOwnership looks up ownership info from cluster object +// and labels the inventory with same info +func (r *ClusterReconciler) patchInventoryOwnership(ctx context.Context, c *seederv1alpha1.Cluster, i *seederv1alpha1.Inventory) (*seederv1alpha1.Inventory, error) { + if c.Annotations == nil { + return i, nil + } + + clusterOwner, ok := c.Labels[seederv1alpha1.ClusterOwnerKey] + // no ownership details found on cluster, so no further action required + if !ok { + return i, nil + } + + if i.Labels == nil { + i.Labels = make(map[string]string) + } + // apply user info + i.Labels[seederv1alpha1.ClusterOwnerKey] = clusterOwner + + // apply user details + clusterOwnerDetails, ok := c.Labels[seederv1alpha1.ClusterOwnerDetailsKey] + if ok { + i.Labels[seederv1alpha1.ClusterOwnerDetailsKey] = clusterOwnerDetails + } + + err := r.Update(ctx, i) + if err != nil { + return i, fmt.Errorf("error updating inventory with ownership details: %v", err) + } + + iObj := &seederv1alpha1.Inventory{} + err = r.Get(ctx, types.NamespacedName{Name: i.Name, Namespace: i.Namespace}, iObj) + return iObj, err +} diff --git a/pkg/controllers/inventory_controller.go b/pkg/controllers/inventory_controller.go index 8f527f0..25579e0 100644 --- a/pkg/controllers/inventory_controller.go +++ b/pkg/controllers/inventory_controller.go @@ -314,6 +314,13 @@ func (r *InventoryReconciler) reconcileBMCJob(ctx context.Context, iObj *seederv func (r *InventoryReconciler) inventoryFreed(ctx context.Context, iObj *seederv1alpha1.Inventory) error { i := iObj.DeepCopy() if util.ConditionExists(i, seederv1alpha1.InventoryFreed) { + if i.Labels != nil { + if _, ok := i.Labels[seederv1alpha1.ClusterOwnerKey]; ok { + delete(i.Labels, seederv1alpha1.ClusterOwnerKey) + delete(i.Labels, seederv1alpha1.ClusterOwnerDetailsKey) + return r.Update(ctx, i) + } + } j := util.GenerateJob(i.Name, i.Namespace, "shutdown") if err := r.jobWrapper(ctx, i, j); err != nil { return err diff --git a/pkg/data/data.go b/pkg/data/data.go index 67b7bb1..d279022 100644 --- a/pkg/data/data.go +++ b/pkg/data/data.go @@ -101,7 +101,7 @@ func chartSeederCrdTemplatesBmcTinkerbellOrg_baseboardmanagementsYaml() (*asset, return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_baseboardmanagements.yaml", size: 5154, mode: os.FileMode(420), modTime: time.Unix(1688446779, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_baseboardmanagements.yaml", size: 5154, mode: os.FileMode(420), modTime: time.Unix(1761694341, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -121,7 +121,7 @@ func chartSeederCrdTemplatesBmcTinkerbellOrg_bmctasksYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_bmctasks.yaml", size: 6779, mode: os.FileMode(420), modTime: time.Unix(1688446779, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_bmctasks.yaml", size: 6779, mode: os.FileMode(420), modTime: time.Unix(1761694341, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -141,7 +141,7 @@ func chartSeederCrdTemplatesBmcTinkerbellOrg_jobsYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_jobs.yaml", size: 8575, mode: os.FileMode(420), modTime: time.Unix(1761523094, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_jobs.yaml", size: 8575, mode: os.FileMode(420), modTime: time.Unix(1761694598, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -161,7 +161,7 @@ func chartSeederCrdTemplatesBmcTinkerbellOrg_machinesYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_machines.yaml", size: 13020, mode: os.FileMode(420), modTime: time.Unix(1761523094, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_machines.yaml", size: 13020, mode: os.FileMode(420), modTime: time.Unix(1761694598, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -181,7 +181,7 @@ func chartSeederCrdTemplatesBmcTinkerbellOrg_tasksYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_tasks.yaml", size: 6447, mode: os.FileMode(420), modTime: time.Unix(1688446779, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/bmc.tinkerbell.org_tasks.yaml", size: 6447, mode: os.FileMode(420), modTime: time.Unix(1761694341, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -201,7 +201,7 @@ func chartSeederCrdTemplatesMetalHarvesterhciIo_addresspoolsYaml() (*asset, erro return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/metal.harvesterhci.io_addresspools.yaml", size: 3227, mode: os.FileMode(420), modTime: time.Unix(1761526191, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/metal.harvesterhci.io_addresspools.yaml", size: 3227, mode: os.FileMode(420), modTime: time.Unix(1761694598, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -221,7 +221,7 @@ func chartSeederCrdTemplatesMetalHarvesterhciIo_clustersYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/metal.harvesterhci.io_clusters.yaml", size: 4750, mode: os.FileMode(420), modTime: time.Unix(1761526191, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/metal.harvesterhci.io_clusters.yaml", size: 4750, mode: os.FileMode(420), modTime: time.Unix(1761694598, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -241,7 +241,7 @@ func chartSeederCrdTemplatesMetalHarvesterhciIo_inventoriesYaml() (*asset, error return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/metal.harvesterhci.io_inventories.yaml", size: 15658, mode: os.FileMode(420), modTime: time.Unix(1761526191, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/metal.harvesterhci.io_inventories.yaml", size: 15658, mode: os.FileMode(420), modTime: time.Unix(1761694598, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -261,7 +261,7 @@ func chartSeederCrdTemplatesTinkerbellOrg_hardwareYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "chart/seeder-crd/templates/tinkerbell.org_hardware.yaml", size: 27602, mode: os.FileMode(420), modTime: time.Unix(1688446779, 0)} + info := bindataFileInfo{name: "chart/seeder-crd/templates/tinkerbell.org_hardware.yaml", size: 27602, mode: os.FileMode(420), modTime: time.Unix(1761694341, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/pkg/webhook/cluster_mutator.go b/pkg/webhook/cluster_mutator.go new file mode 100644 index 0000000..f5a7dde --- /dev/null +++ b/pkg/webhook/cluster_mutator.go @@ -0,0 +1,69 @@ +package webhook + +import ( + "context" + "strings" + + werror "github.com/harvester/webhook/pkg/error" + "github.com/harvester/webhook/pkg/server/admission" + admissionregv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" +) + +type ClusterMutator struct { + client client.Client + ctx context.Context + admission.DefaultMutator +} + +func NewClusterMutator(ctx context.Context, mgr manager.Manager) *ClusterMutator { + return &ClusterMutator{ + client: mgr.GetClient(), + ctx: ctx, + } +} + +func (c *ClusterMutator) Resource() admission.Resource { + return admission.Resource{ + Names: []string{"clusters"}, + Scope: admissionregv1.NamespacedScope, + APIGroup: seederv1alpha1.GroupVersion.Group, + APIVersion: seederv1alpha1.GroupVersion.Version, + ObjectType: &seederv1alpha1.Cluster{}, + OperationTypes: []admissionregv1.OperationType{ + admissionregv1.Create, + }, + } +} + +func (c *ClusterMutator) Create(req *admission.Request, newObj runtime.Object) (admission.Patch, error) { + clusterObj, ok := newObj.(*seederv1alpha1.Cluster) + if !ok { + return nil, werror.NewBadRequest("unable to assert object to Cluster Object") + } + var patchOps admission.Patch + userName := req.UserInfo.Username + + // if there is an existing owner reference, then replace it with details in request + labels := clusterObj.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + + labels[seederv1alpha1.ClusterOwnerKey] = userName + val, ok := req.UserInfo.Extra[seederv1alpha1.ExtraFieldKey] + if ok { + labels[seederv1alpha1.ClusterOwnerDetailsKey] = strings.Join(val, " ") + } + + patchOps = append(patchOps, admission.PatchOp{ + Op: admission.PatchOpAdd, + Path: "/metadata/labels", + Value: labels, + }) + return patchOps, nil +} diff --git a/pkg/webhook/cluster_mutator_test.go b/pkg/webhook/cluster_mutator_test.go new file mode 100644 index 0000000..a5df0ed --- /dev/null +++ b/pkg/webhook/cluster_mutator_test.go @@ -0,0 +1,87 @@ +package webhook + +import ( + "context" + "encoding/json" + "testing" + + seederv1alpha1 "github.com/harvester/seeder/pkg/api/v1alpha1" + "github.com/harvester/webhook/pkg/server/admission" + "github.com/rancher/wrangler/pkg/webhook" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/admission/v1" + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + clusterObj = &seederv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + } + + userName = "dev" + + request = &admission.Request{ + Request: &webhook.Request{ + AdmissionRequest: v1.AdmissionRequest{ + UserInfo: authenticationv1.UserInfo{ + Username: userName, + }, + }, + }, + } +) + +func Test_ClusterMutation(t *testing.T) { + type testCases struct { + name string + generateClusterObject func(*seederv1alpha1.Cluster) *seederv1alpha1.Cluster + } + + var cases = []testCases{ + { + name: "no cluster owner defined", + generateClusterObject: func(cluster *seederv1alpha1.Cluster) *seederv1alpha1.Cluster { return cluster }, + }, + { + name: "cluster object has an owner defined", + generateClusterObject: func(cluster *seederv1alpha1.Cluster) *seederv1alpha1.Cluster { + clusterCopy := cluster.DeepCopy() + clusterCopy.Labels = map[string]string{ + seederv1alpha1.ClusterOwnerKey: "demo", + } + return clusterCopy + }, + }, + } + + for _, testCase := range cases { + assert := require.New(t) + scheme := runtime.NewScheme() + err := seederv1alpha1.AddToScheme(scheme) + assert.NoError(err, testCase.name) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(testCase.generateClusterObject(clusterObj)).Build() + c := &ClusterMutator{ + client: fakeClient, + ctx: context.TODO(), + } + ops, err := c.Create(request, clusterObj) + assert.NoError(err, testCase.name) + assert.Len(ops, 1, testCase.name) + // test out by patching the object + opsByte, err := json.Marshal(ops) + assert.NoError(err, testCase.name) + err = c.client.Patch(context.TODO(), clusterObj, client.RawPatch(types.JSONPatchType, opsByte)) + assert.NoError(err, testCase.name) + updatedObj := &seederv1alpha1.Cluster{} + err = c.client.Get(context.TODO(), types.NamespacedName{Name: clusterObj.Name, Namespace: clusterObj.Namespace}, updatedObj) + assert.NoError(err, testCase.name) + assert.Equal(updatedObj.GetLabels()[seederv1alpha1.ClusterOwnerKey], userName, testCase.name) + } +} diff --git a/pkg/webhook/setup.go b/pkg/webhook/setup.go index 8915b1f..94d7558 100644 --- a/pkg/webhook/setup.go +++ b/pkg/webhook/setup.go @@ -30,6 +30,9 @@ func SetupWebhookServer(ctx context.Context, mgr manager.Manager, namespace stri return err } + if err := webhookServer.RegisterMutators(NewClusterMutator(ctx, mgr)); err != nil { + return err + } // since webhook and manager start run as two go routines, need to wait for caches to sync // before starting the server for {