diff --git a/install/0000_00_cluster-version-operator_03_deployment.yaml b/install/0000_00_cluster-version-operator_03_deployment.yaml index d8a32bd694..ebbdb31f17 100644 --- a/install/0000_00_cluster-version-operator_03_deployment.yaml +++ b/install/0000_00_cluster-version-operator_03_deployment.yaml @@ -56,6 +56,9 @@ spec: name: kube-api-access readOnly: true env: + # Unfortunately the placeholder is not replaced, reported as OCPBUGS-30080 + - name: OPERATOR_IMAGE_VERSION + value: "0.0.1-snapshot" - name: KUBERNETES_SERVICE_PORT # allows CVO to communicate with apiserver directly on same host. Is substituted with port from infrastructures.status.apiServerInternalURL if available. value: "6443" - name: KUBERNETES_SERVICE_HOST # allows CVO to communicate with apiserver directly on same host. Is substituted with hostname from infrastructures.status.apiServerInternalURL if available. diff --git a/pkg/cvo/cvo.go b/pkg/cvo/cvo.go index 134f2cafd5..ee8d49c82a 100644 --- a/pkg/cvo/cvo.go +++ b/pkg/cvo/cvo.go @@ -45,6 +45,7 @@ import ( "github.com/openshift/cluster-version-operator/pkg/customsignaturestore" cvointernal "github.com/openshift/cluster-version-operator/pkg/cvo/internal" "github.com/openshift/cluster-version-operator/pkg/cvo/internal/dynamicclient" + "github.com/openshift/cluster-version-operator/pkg/featuregates" "github.com/openshift/cluster-version-operator/pkg/internal" "github.com/openshift/cluster-version-operator/pkg/payload" "github.com/openshift/cluster-version-operator/pkg/payload/precondition" @@ -164,9 +165,7 @@ type Operator struct { // via annotation exclude string - // requiredFeatureSet is set the value of featuregates.config.openshift.io|.spec.featureSet. It's a very slow - // moving resource, so it is not re-detected live. - requiredFeatureSet string + enabledFeatureGates featuregates.CvoGateChecker clusterProfile string uid types.UID @@ -187,7 +186,6 @@ func New( client clientset.Interface, kubeClient kubernetes.Interface, exclude string, - requiredFeatureSet string, clusterProfile string, promqlTarget clusterconditions.PromQLTarget, injectClusterIdIntoPromQL bool, @@ -219,10 +217,14 @@ func New( upgradeableQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "upgradeable"), exclude: exclude, - requiredFeatureSet: requiredFeatureSet, clusterProfile: clusterProfile, conditionRegistry: standard.NewConditionRegistry(promqlTarget), injectClusterIdIntoPromQL: injectClusterIdIntoPromQL, + + // Because of OCPBUGS-30080, we can only detect the enabled feature gates after Operator loads the initial payload + // from disk via LoadInitialPayload. We must not have any gate-checking code until that happens, so we initialize + // this field with a checker that panics when used. + enabledFeatureGates: featuregates.PanicOnUsageBeforeInitialization, } if _, err := cvInformer.Informer().AddEventHandler(optr.clusterVersionEventHandler()); err != nil { @@ -254,10 +256,9 @@ func New( return optr, nil } -// InitializeFromPayload waits until a ClusterVersion object exists. It then retrieves the payload contents and verifies the -// initial state, then configures the controller that loads and applies content to the cluster. It returns an error if the -// payload appears to be in error rather than continuing. -func (optr *Operator) InitializeFromPayload(ctx context.Context, restConfig *rest.Config, burstRestConfig *rest.Config) error { +// LoadInitialPayload waits until a ClusterVersion object exists. It then retrieves the payload contents, verifies the +// initial state and returns it. If the payload is invalid, an error is returned. +func (optr *Operator) LoadInitialPayload(ctx context.Context, startingRequiredFeatureSet configv1.FeatureSet, restConfig *rest.Config) (*payload.Update, error) { // wait until cluster version object exists if err := wait.PollUntilContextCancel(ctx, 3*time.Second, true, func(ctx context.Context) (bool, error) { @@ -274,24 +275,19 @@ func (optr *Operator) InitializeFromPayload(ctx context.Context, restConfig *res } return true, nil }); err != nil { - return fmt.Errorf("Error when attempting to get cluster version object: %w", err) + return nil, fmt.Errorf("Error when attempting to get cluster version object: %w", err) } - update, err := payload.LoadUpdate(optr.defaultPayloadDir(), optr.release.Image, optr.exclude, optr.requiredFeatureSet, + update, err := payload.LoadUpdate(optr.defaultPayloadDir(), optr.release.Image, optr.exclude, string(startingRequiredFeatureSet), optr.clusterProfile, capability.GetKnownCapabilities()) if err != nil { - return fmt.Errorf("the local release contents are invalid - no current version can be determined from disk: %v", err) + return nil, fmt.Errorf("the local release contents are invalid - no current version can be determined from disk: %v", err) } - - optr.release = update.Release - optr.releaseCreated = update.ImageRef.CreationTimestamp.Time - optr.SetArchitecture(update.Architecture) - httpClientConstructor := sigstore.NewCachedHTTPClientConstructor(optr.HTTPClient, nil) configClient, err := coreclientsetv1.NewForConfig(restConfig) if err != nil { - return fmt.Errorf("unable to create a configuration client: %v", err) + return nil, fmt.Errorf("unable to create a configuration client: %v", err) } customSignatureStore := &customsignaturestore.Store{ @@ -303,7 +299,7 @@ func (optr *Operator) InitializeFromPayload(ctx context.Context, restConfig *res // attempt to load a verifier as defined in the payload verifier, signatureStore, err := loadConfigMapVerifierDataFromUpdate(update, httpClientConstructor.HTTPClient, configClient, customSignatureStore) if err != nil { - return err + return nil, err } if verifier != nil { klog.Infof("Verifying release authenticity: %v", verifier) @@ -313,6 +309,16 @@ func (optr *Operator) InitializeFromPayload(ctx context.Context, restConfig *res } optr.verifier = verifier optr.signatureStore = signatureStore + return update, nil +} + +// InitializeFromPayload configures the controller that loads and applies content to the cluster given an initial payload +// and feature gate data. +func (optr *Operator) InitializeFromPayload(update *payload.Update, requiredFeatureSet configv1.FeatureSet, cvoFlags featuregates.CvoGateChecker, restConfig *rest.Config, burstRestConfig *rest.Config) { + optr.enabledFeatureGates = cvoFlags + optr.release = update.Release + optr.releaseCreated = update.ImageRef.CreationTimestamp.Time + optr.SetArchitecture(update.Architecture) // after the verifier has been loaded, initialize the sync worker with a payload retriever // which will consume the verifier @@ -328,12 +334,10 @@ func (optr *Operator) InitializeFromPayload(ctx context.Context, restConfig *res Cap: time.Second * 15, }, optr.exclude, - optr.requiredFeatureSet, + requiredFeatureSet, optr.eventRecorder, optr.clusterProfile, ) - - return nil } // ownerReferenceModifier sets the owner reference to the current CV resource if no other reference exists. It also resets diff --git a/pkg/cvo/cvo_scenarios_test.go b/pkg/cvo/cvo_scenarios_test.go index 5b7612d3ee..82c1cdd5a7 100644 --- a/pkg/cvo/cvo_scenarios_test.go +++ b/pkg/cvo/cvo_scenarios_test.go @@ -26,10 +26,11 @@ import ( configv1 "github.com/openshift/api/config/v1" "github.com/openshift/client-go/config/clientset/versioned/fake" + "github.com/openshift/library-go/pkg/manifest" + "github.com/openshift/cluster-version-operator/pkg/featuregates" "github.com/openshift/cluster-version-operator/pkg/payload" "github.com/openshift/cluster-version-operator/pkg/payload/precondition" - "github.com/openshift/library-go/pkg/manifest" ) var architecture string @@ -108,14 +109,15 @@ func setupCVOTest(payloadDir string) (*Operator, map[string]apiruntime.Object, * } o := &Operator{ - namespace: "test", - name: "version", - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cvo-loop-test"), - client: client, - cvLister: &clientCVLister{client: client}, - exclude: "exclude-test", - eventRecorder: record.NewFakeRecorder(100), - clusterProfile: payload.DefaultClusterProfile, + namespace: "test", + name: "version", + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cvo-loop-test"), + client: client, + enabledFeatureGates: featuregates.DefaultCvoGates("version"), + cvLister: &clientCVLister{client: client}, + exclude: "exclude-test", + eventRecorder: record.NewFakeRecorder(100), + clusterProfile: payload.DefaultClusterProfile, } dynamicScheme := apiruntime.NewScheme() diff --git a/pkg/cvo/cvo_test.go b/pkg/cvo/cvo_test.go index cb3d693a90..73c2275bb6 100644 --- a/pkg/cvo/cvo_test.go +++ b/pkg/cvo/cvo_test.go @@ -39,11 +39,12 @@ import ( configv1 "github.com/openshift/api/config/v1" clientset "github.com/openshift/client-go/config/clientset/versioned" "github.com/openshift/client-go/config/clientset/versioned/fake" - - "github.com/openshift/cluster-version-operator/pkg/payload" "github.com/openshift/library-go/pkg/manifest" "github.com/openshift/library-go/pkg/verify/store/serial" "github.com/openshift/library-go/pkg/verify/store/sigstore" + + "github.com/openshift/cluster-version-operator/pkg/featuregates" + "github.com/openshift/cluster-version-operator/pkg/payload" ) var ( @@ -2273,6 +2274,7 @@ func TestOperator_sync(t *testing.T) { optr.configSync = &fakeSyncRecorder{Returns: expectStatus} } optr.eventRecorder = record.NewFakeRecorder(100) + optr.enabledFeatureGates = featuregates.DefaultCvoGates("version") ctx := context.Background() err := optr.sync(ctx, optr.queueKey()) diff --git a/pkg/cvo/reconciliation_issues.go b/pkg/cvo/reconciliation_issues.go new file mode 100644 index 0000000000..144cc619ef --- /dev/null +++ b/pkg/cvo/reconciliation_issues.go @@ -0,0 +1,13 @@ +package cvo + +import v1 "github.com/openshift/api/config/v1" + +const ( + reconciliationIssuesConditionType v1.ClusterStatusConditionType = "ReconciliationIssues" + + noReconciliationIssuesReason string = "NoIssues" + noReconciliationIssuesMessage string = "No issues found during reconciliation" + + reconciliationIssuesFoundReason string = "IssuesFound" + reconciliationIssuesFoundMessage string = "Issues found during reconciliation" +) diff --git a/pkg/cvo/status.go b/pkg/cvo/status.go index 25d7142b6a..36ec476c22 100644 --- a/pkg/cvo/status.go +++ b/pkg/cvo/status.go @@ -23,6 +23,7 @@ import ( configclientv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" "github.com/openshift/cluster-version-operator/lib/resourcemerge" + "github.com/openshift/cluster-version-operator/pkg/featuregates" "github.com/openshift/cluster-version-operator/pkg/payload" ) @@ -198,7 +199,7 @@ func (optr *Operator) syncStatus(ctx context.Context, original, config *configv1 original = config.DeepCopy() } - updateClusterVersionStatus(&config.Status, status, optr.release, optr.getAvailableUpdates, validationErrs) + updateClusterVersionStatus(&config.Status, status, optr.release, optr.getAvailableUpdates, optr.enabledFeatureGates, validationErrs) if klog.V(6).Enabled() { klog.Infof("Apply config: %s", diff.ObjectReflectDiff(original, config)) @@ -210,7 +211,8 @@ func (optr *Operator) syncStatus(ctx context.Context, original, config *configv1 // updateClusterVersionStatus updates the passed cvStatus with the latest status information func updateClusterVersionStatus(cvStatus *configv1.ClusterVersionStatus, status *SyncWorkerStatus, - release configv1.Release, getAvailableUpdates func() *availableUpdates, validationErrs field.ErrorList) { + release configv1.Release, getAvailableUpdates func() *availableUpdates, enabledGates featuregates.CvoGateChecker, + validationErrs field.ErrorList) { cvStatus.ObservedGeneration = status.Generation if len(status.VersionHash) > 0 { @@ -379,6 +381,24 @@ func updateClusterVersionStatus(cvStatus *configv1.ClusterVersionStatus, status } } + oldRiCondition := resourcemerge.FindOperatorStatusCondition(cvStatus.Conditions, reconciliationIssuesConditionType) + if enabledGates.ReconciliationIssuesCondition() || (oldRiCondition != nil && enabledGates.UnknownVersion()) { + riCondition := configv1.ClusterOperatorStatusCondition{ + Type: reconciliationIssuesConditionType, + Status: configv1.ConditionFalse, + Reason: noReconciliationIssuesReason, + Message: noReconciliationIssuesMessage, + } + if status.Failure != nil { + riCondition.Status = configv1.ConditionTrue + riCondition.Reason = reconciliationIssuesFoundReason + riCondition.Message = fmt.Sprintf("%s: %s", reconciliationIssuesFoundMessage, status.Failure.Error()) + } + resourcemerge.SetOperatorStatusCondition(&cvStatus.Conditions, riCondition) + } else if oldRiCondition != nil { + resourcemerge.RemoveOperatorStatusCondition(&cvStatus.Conditions, reconciliationIssuesConditionType) + } + // default retrieved updates if it is not set if resourcemerge.FindOperatorStatusCondition(cvStatus.Conditions, configv1.RetrievedUpdates) == nil { resourcemerge.SetOperatorStatusCondition(&cvStatus.Conditions, configv1.ClusterOperatorStatusCondition{ diff --git a/pkg/cvo/status_test.go b/pkg/cvo/status_test.go index f9985f34ad..c98d8985db 100644 --- a/pkg/cvo/status_test.go +++ b/pkg/cvo/status_test.go @@ -6,12 +6,17 @@ import ( "reflect" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/client-go/tools/record" configv1 "github.com/openshift/api/config/v1" "github.com/openshift/client-go/config/clientset/versioned/fake" + + "github.com/openshift/cluster-version-operator/lib/resourcemerge" ) func Test_mergeEqualVersions(t *testing.T) { @@ -190,3 +195,153 @@ func TestOperator_syncFailingStatus(t *testing.T) { }) } } + +type fakeRiFlags struct { + unknownVersion bool + reconciliationIssuesCondition bool +} + +func (f fakeRiFlags) UnknownVersion() bool { + return f.unknownVersion +} + +func (f fakeRiFlags) ReconciliationIssuesCondition() bool { + return f.reconciliationIssuesCondition +} + +func TestUpdateClusterVersionStatus_UnknownVersionAndReconciliationIssues(t *testing.T) { + ignoreLastTransitionTime := cmpopts.IgnoreFields(configv1.ClusterOperatorStatusCondition{}, "LastTransitionTime") + + testCases := []struct { + name string + + unknownVersion bool + oldCondition *configv1.ClusterOperatorStatusCondition + failure error + + expectedRiCondition *configv1.ClusterOperatorStatusCondition + }{ + { + name: "ReconciliationIssues disabled, version known, no failure => condition not present", + unknownVersion: false, + expectedRiCondition: nil, + }, + { + name: "ReconciliationIssues disabled, version known, failure => condition not present", + unknownVersion: false, + failure: fmt.Errorf("Something happened"), + expectedRiCondition: nil, + }, + { + name: "ReconciliationIssues disabled, version unknown, failure, existing condition => condition present", + oldCondition: &configv1.ClusterOperatorStatusCondition{ + Type: reconciliationIssuesConditionType, + Status: configv1.ConditionFalse, + Reason: noReconciliationIssuesReason, + Message: "Happy condition is happy", + }, + unknownVersion: true, + failure: fmt.Errorf("Something happened"), + expectedRiCondition: &configv1.ClusterOperatorStatusCondition{ + Type: reconciliationIssuesConditionType, + Status: configv1.ConditionTrue, + Reason: reconciliationIssuesFoundReason, + Message: "Issues found during reconciliation: Something happened", + }, + }, + { + name: "ReconciliationIssues disabled, version unknown, failure, no existing condition => condition not present", + unknownVersion: true, + failure: fmt.Errorf("Something happened"), + expectedRiCondition: nil, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + gates := fakeRiFlags{ + unknownVersion: tc.unknownVersion, + reconciliationIssuesCondition: false, + } + release := configv1.Release{} + getAvailableUpdates := func() *availableUpdates { return nil } + var noErrors field.ErrorList + cvStatus := configv1.ClusterVersionStatus{} + if tc.oldCondition != nil { + cvStatus.Conditions = append(cvStatus.Conditions, *tc.oldCondition) + } + updateClusterVersionStatus(&cvStatus, &SyncWorkerStatus{Failure: tc.failure}, release, getAvailableUpdates, gates, noErrors) + condition := resourcemerge.FindOperatorStatusCondition(cvStatus.Conditions, reconciliationIssuesConditionType) + if diff := cmp.Diff(tc.expectedRiCondition, condition, ignoreLastTransitionTime); diff != "" { + t.Errorf("unexpected condition\n:%s", diff) + } + }) + + } + +} + +func TestUpdateClusterVersionStatus_ReconciliationIssues(t *testing.T) { + ignoreLastTransitionTime := cmpopts.IgnoreFields(configv1.ClusterOperatorStatusCondition{}, "LastTransitionTime") + + testCases := []struct { + name string + syncWorkerStatus SyncWorkerStatus + + enabled bool + + expectedCondition *configv1.ClusterOperatorStatusCondition + }{ + { + name: "ReconciliationIssues present and happy when gate is enabled and no failures happened", + syncWorkerStatus: SyncWorkerStatus{}, + enabled: true, + expectedCondition: &configv1.ClusterOperatorStatusCondition{ + Type: reconciliationIssuesConditionType, + Status: configv1.ConditionFalse, + Reason: noReconciliationIssuesReason, + Message: noReconciliationIssuesMessage, + }, + }, + { + name: "ReconciliationIssues present and unhappy when gate is enabled and failures happened", + syncWorkerStatus: SyncWorkerStatus{ + Failure: fmt.Errorf("Something happened"), + }, + enabled: true, + expectedCondition: &configv1.ClusterOperatorStatusCondition{ + Type: reconciliationIssuesConditionType, + Status: configv1.ConditionTrue, + Reason: reconciliationIssuesFoundReason, + Message: "Issues found during reconciliation: Something happened", + }, + }, + { + name: "ReconciliationIssues not present when gate is enabled and failures happened", + syncWorkerStatus: SyncWorkerStatus{ + Failure: fmt.Errorf("Something happened"), + }, + enabled: false, + expectedCondition: nil, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + gates := fakeRiFlags{ + unknownVersion: false, + reconciliationIssuesCondition: tc.enabled, + } + release := configv1.Release{} + getAvailableUpdates := func() *availableUpdates { return nil } + var noErrors field.ErrorList + cvStatus := configv1.ClusterVersionStatus{} + updateClusterVersionStatus(&cvStatus, &tc.syncWorkerStatus, release, getAvailableUpdates, gates, noErrors) + condition := resourcemerge.FindOperatorStatusCondition(cvStatus.Conditions, reconciliationIssuesConditionType) + if diff := cmp.Diff(tc.expectedCondition, condition, ignoreLastTransitionTime); diff != "" { + t.Errorf("unexpected condition\n:%s", diff) + } + }) + } +} diff --git a/pkg/cvo/sync_worker.go b/pkg/cvo/sync_worker.go index 2bf2d26a42..cfaeb3a318 100644 --- a/pkg/cvo/sync_worker.go +++ b/pkg/cvo/sync_worker.go @@ -178,14 +178,14 @@ type SyncWorker struct { // requiredFeatureSet is set to the value of Feature.config.openshift.io|spec.featureSet, which contributes to // whether or not some manifests are included for reconciliation. - requiredFeatureSet string + requiredFeatureSet configv1.FeatureSet clusterProfile string } // NewSyncWorker initializes a ConfigSyncWorker that will retrieve payloads to disk, apply them via builder // to a server, and obey limits about how often to reconcile or retry on errors. -func NewSyncWorker(retriever PayloadRetriever, builder payload.ResourceBuilder, reconcileInterval time.Duration, backoff wait.Backoff, exclude string, requiredFeatureSet string, eventRecorder record.EventRecorder, clusterProfile string) *SyncWorker { +func NewSyncWorker(retriever PayloadRetriever, builder payload.ResourceBuilder, reconcileInterval time.Duration, backoff wait.Backoff, exclude string, requiredFeatureSet configv1.FeatureSet, eventRecorder record.EventRecorder, clusterProfile string) *SyncWorker { return &SyncWorker{ retriever: retriever, builder: builder, @@ -210,7 +210,7 @@ func NewSyncWorker(retriever PayloadRetriever, builder payload.ResourceBuilder, // NewSyncWorkerWithPreconditions initializes a ConfigSyncWorker that will retrieve payloads to disk, apply them via builder // to a server, and obey limits about how often to reconcile or retry on errors. // It allows providing preconditions for loading payload. -func NewSyncWorkerWithPreconditions(retriever PayloadRetriever, builder payload.ResourceBuilder, preconditions precondition.List, reconcileInterval time.Duration, backoff wait.Backoff, exclude string, requiredFeatureSet string, eventRecorder record.EventRecorder, clusterProfile string) *SyncWorker { +func NewSyncWorkerWithPreconditions(retriever PayloadRetriever, builder payload.ResourceBuilder, preconditions precondition.List, reconcileInterval time.Duration, backoff wait.Backoff, exclude string, requiredFeatureSet configv1.FeatureSet, eventRecorder record.EventRecorder, clusterProfile string) *SyncWorker { worker := NewSyncWorker(retriever, builder, reconcileInterval, backoff, exclude, requiredFeatureSet, eventRecorder, clusterProfile) worker.preconditions = preconditions return worker @@ -315,7 +315,7 @@ func (w *SyncWorker) syncPayload(ctx context.Context, work *SyncWork) ([]configv // Capability filtering is not done here since unknown capabilities are allowed // during updated payload load and enablement checking only occurs during apply. - payloadUpdate, err := payload.LoadUpdate(info.Directory, desired.Image, w.exclude, w.requiredFeatureSet, w.clusterProfile, nil) + payloadUpdate, err := payload.LoadUpdate(info.Directory, desired.Image, w.exclude, string(w.requiredFeatureSet), w.clusterProfile, nil) if err != nil { msg := fmt.Sprintf("Loading payload failed version=%q image=%q failure=%v", desired.Version, desired.Image, err) diff --git a/pkg/featurechangestopper/featurechangestopper.go b/pkg/featuregates/featurechangestopper.go similarity index 67% rename from pkg/featurechangestopper/featurechangestopper.go rename to pkg/featuregates/featurechangestopper.go index 93e0afed99..7231bc82b2 100644 --- a/pkg/featurechangestopper/featurechangestopper.go +++ b/pkg/featuregates/featurechangestopper.go @@ -1,4 +1,4 @@ -package featurechangestopper +package featuregates import ( "context" @@ -17,9 +17,10 @@ import ( "k8s.io/klog/v2" ) -// FeatureChangeStopper calls stop when the value of the featureset changes -type FeatureChangeStopper struct { - startingRequiredFeatureSet string +// ChangeStopper calls stop when the value of the featureset changes +type ChangeStopper struct { + startingRequiredFeatureSet *configv1.FeatureSet + startingCvoGates *CvoGates featureGateLister configlistersv1.FeatureGateLister cacheSynced []cache.InformerSynced @@ -28,18 +29,13 @@ type FeatureChangeStopper struct { shutdownFn context.CancelFunc } -// New returns a new FeatureChangeStopper. -func New( - startingRequiredFeatureSet string, - featureGateInformer configinformersv1.FeatureGateInformer, -) (*FeatureChangeStopper, error) { - c := &FeatureChangeStopper{ - startingRequiredFeatureSet: startingRequiredFeatureSet, - featureGateLister: featureGateInformer.Lister(), - cacheSynced: []cache.InformerSynced{featureGateInformer.Informer().HasSynced}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "feature-gate-stopper"), +// NewChangeStopper returns a new ChangeStopper. +func NewChangeStopper(featureGateInformer configinformersv1.FeatureGateInformer) (*ChangeStopper, error) { + c := &ChangeStopper{ + featureGateLister: featureGateInformer.Lister(), + cacheSynced: []cache.InformerSynced{featureGateInformer.Informer().HasSynced}, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "feature-gate-stopper"), } - c.queue.Add("cluster") // seed an initial sync, in case startingRequiredFeatureSet is wrong if _, err := featureGateInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(_ interface{}) { @@ -58,25 +54,41 @@ func New( return c, nil } +func (c *ChangeStopper) SetStartingFeatures(requiredFeatureSet configv1.FeatureSet, cvoGates CvoGates) { + c.startingRequiredFeatureSet = &requiredFeatureSet + c.startingCvoGates = &cvoGates +} + // syncHandler processes a single work entry, with the // processNextWorkItem caller handling the queue management. It returns // done when there will be no more work (because the feature gate changed). -func (c *FeatureChangeStopper) syncHandler(ctx context.Context) (done bool, err error) { +func (c *ChangeStopper) syncHandler(_ context.Context) (done bool, err error) { var current configv1.FeatureSet + var currentCvoGates CvoGates if featureGates, err := c.featureGateLister.Get("cluster"); err == nil { + current = featureGates.Spec.FeatureSet + currentCvoGates = CvoGatesFromFeatureGate(featureGates, c.startingCvoGates.desiredVersion) } else if !apierrors.IsNotFound(err) { return false, err } - if string(current) != c.startingRequiredFeatureSet { + featureSetChanged := current != *c.startingRequiredFeatureSet + cvoFeaturesChanged := currentCvoGates != *c.startingCvoGates + if featureSetChanged || cvoFeaturesChanged { var action string if c.shutdownFn == nil { action = "no shutdown function configured" } else { action = "requesting shutdown" } - klog.Infof("FeatureSet was %q, but the current feature set is %q; %s.", c.startingRequiredFeatureSet, current, action) + if featureSetChanged { + klog.Infof("FeatureSet was %q, but the current feature set is %q; %s.", *c.startingRequiredFeatureSet, current, action) + } + if cvoFeaturesChanged { + klog.Infof("CVO feature flags were %+v, but changed to %+v; %s.", c.startingCvoGates, currentCvoGates, action) + } + if c.shutdownFn != nil { c.shutdownFn() } @@ -86,7 +98,11 @@ func (c *FeatureChangeStopper) syncHandler(ctx context.Context) (done bool, err } // Run launches the controller and blocks until it is canceled or work completes. -func (c *FeatureChangeStopper) Run(ctx context.Context, shutdownFn context.CancelFunc) error { +func (c *ChangeStopper) Run(ctx context.Context, shutdownFn context.CancelFunc) error { + if c.startingRequiredFeatureSet == nil || c.startingCvoGates == nil { + return errors.New("BUG: startingRequiredFeatureSet and startingCvoGates must be set before calling Run") + + } // don't let panics crash the process defer utilruntime.HandleCrash() // make sure the work queue is shutdown which will trigger workers to end @@ -99,13 +115,13 @@ func (c *FeatureChangeStopper) Run(ctx context.Context, shutdownFn context.Cance }() c.shutdownFn = shutdownFn - klog.Infof("Starting stop-on-featureset-change controller with %q.", c.startingRequiredFeatureSet) - // wait for your secondary caches to fill before starting your work if !cache.WaitForCacheSync(ctx.Done(), c.cacheSynced...) { return errors.New("feature gate cache failed to sync") } + klog.Infof("Starting stop-on-features-change controller with startingRequiredFeatureSet=%q startingCvoGates=%+v", *c.startingRequiredFeatureSet, *c.startingCvoGates) + err := wait.PollUntilContextCancel(ctx, 30*time.Second, true, c.runWorker) klog.Info("Shutting down stop-on-featureset-change controller") return err @@ -114,7 +130,7 @@ func (c *FeatureChangeStopper) Run(ctx context.Context, shutdownFn context.Cance // runWorker handles a single worker poll round, processing as many // work items as possible, and returning done when there will be no // more work. -func (c *FeatureChangeStopper) runWorker(ctx context.Context) (done bool, err error) { +func (c *ChangeStopper) runWorker(ctx context.Context) (done bool, err error) { // hot loop until we're told to stop. processNextWorkItem will // automatically wait until there's work available, so we don't worry // about secondary waits @@ -127,7 +143,7 @@ func (c *FeatureChangeStopper) runWorker(ctx context.Context) (done bool, err er // processNextWorkItem deals with one key off the queue. It returns // done when there will be no more work. -func (c *FeatureChangeStopper) processNextWorkItem(ctx context.Context) (done bool, err error) { +func (c *ChangeStopper) processNextWorkItem(ctx context.Context) (done bool, err error) { // pull the next work item from queue. It should be a key we use to lookup // something in a cache key, quit := c.queue.Get() diff --git a/pkg/featurechangestopper/featurechangestopper_test.go b/pkg/featuregates/featurechangestopper_test.go similarity index 52% rename from pkg/featurechangestopper/featurechangestopper_test.go rename to pkg/featuregates/featurechangestopper_test.go index 9b92cb8ff5..7e290cf219 100644 --- a/pkg/featurechangestopper/featurechangestopper_test.go +++ b/pkg/featuregates/featurechangestopper_test.go @@ -1,4 +1,4 @@ -package featurechangestopper +package featuregates import ( "context" @@ -12,42 +12,65 @@ import ( ) func TestTechPreviewChangeStopper(t *testing.T) { + versionForGates := "1.2.3" tests := []struct { name string - startingRequiredFeatureSet string - featureGate string - expectedShutdownCalled bool + startingRequiredFeatureSet configv1.FeatureSet + startingCvoFeatureGates CvoGates + + featureSet string + featureGateStatus *configv1.FeatureGateStatus + + expectedShutdownCalled bool }{ { name: "default-no-change", startingRequiredFeatureSet: "", - featureGate: "", + featureSet: "", expectedShutdownCalled: false, }, { name: "default-with-change-to-tech-preview", startingRequiredFeatureSet: "", - featureGate: "TechPreviewNoUpgrade", + featureSet: "TechPreviewNoUpgrade", expectedShutdownCalled: true, }, { name: "default-with-change-to-other", startingRequiredFeatureSet: "", - featureGate: "AnythingElse", + featureSet: "AnythingElse", expectedShutdownCalled: true, }, { name: "techpreview-to-techpreview", startingRequiredFeatureSet: "TechPreviewNoUpgrade", - featureGate: "TechPreviewNoUpgrade", + featureSet: "TechPreviewNoUpgrade", expectedShutdownCalled: false, }, { name: "techpreview-to-not-tech-preview", // this isn't allowed today startingRequiredFeatureSet: "TechPreviewNoUpgrade", - featureGate: "", + featureSet: "", expectedShutdownCalled: true, }, + { + name: "cvo flags changed", + startingRequiredFeatureSet: "TechPreviewNoUpgrade", + startingCvoFeatureGates: CvoGates{ + desiredVersion: versionForGates, + unknownVersion: true, + }, + featureSet: "TechPreviewNoUpgrade", + featureGateStatus: &configv1.FeatureGateStatus{ + FeatureGates: []configv1.FeatureGateDetails{ + { + Version: versionForGates, + Enabled: []configv1.FeatureGateAttributes{{Name: configv1.FeatureGateUpgradeStatus}}, + }, + }, + }, + expectedShutdownCalled: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -59,23 +82,29 @@ func TestTechPreviewChangeStopper(t *testing.T) { actualShutdownCalled = true } - client := fakeconfigv1client.NewSimpleClientset( - &configv1.FeatureGate{ - ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, - Spec: configv1.FeatureGateSpec{ - FeatureGateSelection: configv1.FeatureGateSelection{ - FeatureSet: configv1.FeatureSet(tt.featureGate), - }, + fg := &configv1.FeatureGate{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: configv1.FeatureGateSpec{ + FeatureGateSelection: configv1.FeatureGateSelection{ + FeatureSet: configv1.FeatureSet(tt.featureSet), }, }, - ) + } + if tt.featureGateStatus != nil { + fg.Status = *tt.featureGateStatus + } else { + fg.Status = configv1.FeatureGateStatus{} + tt.startingCvoFeatureGates = CvoGates{unknownVersion: true} + } + + client := fakeconfigv1client.NewSimpleClientset(fg) informerFactory := configv1informer.NewSharedInformerFactory(client, 0) - featureGates := informerFactory.Config().V1().FeatureGates() - c, err := New(tt.startingRequiredFeatureSet, featureGates) + c, err := NewChangeStopper(informerFactory.Config().V1().FeatureGates()) if err != nil { t.Fatal(err) } + c.SetStartingFeatures(tt.startingRequiredFeatureSet, tt.startingCvoFeatureGates) informerFactory.Start(ctx.Done()) if err := c.Run(ctx, shutdownFn); err != nil { diff --git a/pkg/featuregates/featuregates.go b/pkg/featuregates/featuregates.go new file mode 100644 index 0000000000..2563a3be8c --- /dev/null +++ b/pkg/featuregates/featuregates.go @@ -0,0 +1,104 @@ +package featuregates + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// CvoGateChecker allows CVO code to check which feature gates are enabled +type CvoGateChecker interface { + // UnknownVersion flag is set to true if CVO did not find a matching version in the FeatureGate + // status resource, meaning the current set of enabled and disabled feature gates is unknown for + // this version. This should be a temporary state (config-operator should eventually add the + // enabled/disabled flags for this version), so CVO should try to behave in a way that reflects + // a "good default": default-on flags are enabled, default-off flags are disabled. Where reasonable, + // It can also attempt to tolerate the existing state: if it finds evidence that a feature was + // enabled, it can continue to behave as if it was enabled and vice versa. This temporary state + // should be eventually resolved when the FeatureGate status resource is updated, which forces CVO + // to restart when the flags change. + UnknownVersion() bool + + // ReconciliationIssuesCondition controls whether CVO maintains a Condition with + // ReconciliationIssues type, containing a JSON that describes all "issues" that prevented + // or delayed CVO from reconciling individual resources in the cluster. This is a pseudo-API + // that the experimental work for "oc adm upgrade status" uses to report upgrade status, and + // should never be relied upon by any production code. We may want to eventually turn this into + // some kind of "real" API. + ReconciliationIssuesCondition() bool +} + +type panicOnUsageBeforeInitializationFunc func() + +func panicOnUsageBeforeInitialization() { + panic("CVO feature flags were used before they were initialized") +} + +// PanicOnUsageBeforeInitialization is a CvoGateChecker that panics if any of its methods are called. This checker should +// be used before CVO feature gates are actually known and some code tries to check them. +var PanicOnUsageBeforeInitialization = panicOnUsageBeforeInitializationFunc(panicOnUsageBeforeInitialization) + +func (p panicOnUsageBeforeInitializationFunc) ReconciliationIssuesCondition() bool { + p() + return false +} + +func (p panicOnUsageBeforeInitializationFunc) UnknownVersion() bool { + p() + return false +} + +// CvoGates contains flags that control CVO functionality gated by product feature gates. The +// names do not correspond to product feature gates, the booleans here are "smaller" (product-level +// gate will enable multiple CVO behaviors). +type CvoGates struct { + // desiredVersion stores the currently executing version of CVO, for which these feature gates + // are relevant + desiredVersion string + + // individual flags mirror the CvoGateChecker interface + unknownVersion bool + reconciliationIssuesCondition bool +} + +func (c CvoGates) ReconciliationIssuesCondition() bool { + return c.reconciliationIssuesCondition +} + +func (c CvoGates) UnknownVersion() bool { + return c.unknownVersion +} + +// DefaultCvoGates apply when actual features for given version are unknown +func DefaultCvoGates(version string) CvoGates { + return CvoGates{ + desiredVersion: version, + unknownVersion: true, + reconciliationIssuesCondition: false, + } +} + +// CvoGatesFromFeatureGate finds feature gates for a given version in a FeatureGate resource and returns +// CvoGates that reflects them, or the default gates if given version was not found in the FeatureGate +func CvoGatesFromFeatureGate(gate *configv1.FeatureGate, version string) CvoGates { + enabledGates := DefaultCvoGates(version) + + for _, g := range gate.Status.FeatureGates { + + if g.Version != version { + continue + } + // We found the matching version, so we do not need to run in the unknown version mode + enabledGates.unknownVersion = false + for _, enabled := range g.Enabled { + if enabled.Name == configv1.FeatureGateUpgradeStatus { + enabledGates.reconciliationIssuesCondition = true + } + } + for _, disabled := range g.Disabled { + if disabled.Name == configv1.FeatureGateUpgradeStatus { + enabledGates.reconciliationIssuesCondition = false + } + } + } + + return enabledGates +} diff --git a/pkg/start/start.go b/pkg/start/start.go index 46cf79140d..265d8f222b 100644 --- a/pkg/start/start.go +++ b/pkg/start/start.go @@ -13,6 +13,7 @@ import ( "time" "github.com/google/uuid" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,15 +33,16 @@ import ( configv1 "github.com/openshift/api/config/v1" clientset "github.com/openshift/client-go/config/clientset/versioned" - externalversions "github.com/openshift/client-go/config/informers/externalversions" + "github.com/openshift/client-go/config/informers/externalversions" + "github.com/openshift/library-go/pkg/config/clusterstatus" + libgoleaderelection "github.com/openshift/library-go/pkg/config/leaderelection" + "github.com/openshift/cluster-version-operator/pkg/autoupdate" "github.com/openshift/cluster-version-operator/pkg/clusterconditions" "github.com/openshift/cluster-version-operator/pkg/cvo" - "github.com/openshift/cluster-version-operator/pkg/featurechangestopper" + "github.com/openshift/cluster-version-operator/pkg/featuregates" "github.com/openshift/cluster-version-operator/pkg/internal" "github.com/openshift/cluster-version-operator/pkg/payload" - "github.com/openshift/library-go/pkg/config/clusterstatus" - libgoleaderelection "github.com/openshift/library-go/pkg/config/leaderelection" ) const ( @@ -155,47 +157,13 @@ func (o *Options) Run(ctx context.Context) error { return fmt.Errorf("error creating clients: %v", err) } - // check to see if techpreview should be on or off. If we cannot read the featuregate for any reason, it is assumed - // to be off. If this value changes, the binary will shutdown and expect the pod lifecycle to restart it. - startingFeatureSet := "" - - // client-go automatically retries some network blip errors on GETs for 30s by default, and we want to - // retry the remaining ones ourselves. If we fail longer than that, the operator won't be able to do work - // anyway. Return the error and crashloop. - // - // We implement the timeout with a context because the timeout in PollImmediateWithContext does not behave - // well when ConditionFunc takes longer time to execute, like here where the GET can be retried by client-go - var lastError error - if err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 25*time.Second, true, func(ctx context.Context) (bool, error) { - gate, fgErr := cb.ClientOrDie("feature-gate-getter").ConfigV1().FeatureGates().Get(ctx, "cluster", metav1.GetOptions{}) - switch { - case apierrors.IsNotFound(fgErr): - // if we have no featuregates, then the cluster is using the default featureset, which is "". - // This excludes everything that could possibly depend on a different feature set. - startingFeatureSet = "" - return true, nil - case fgErr != nil: - lastError = fgErr - klog.Warningf("Failed to get FeatureGate from cluster: %v", fgErr) - return false, nil - default: - startingFeatureSet = string(gate.Spec.FeatureSet) - return true, nil - } - }); err != nil { - if lastError != nil { - return lastError - } - return err - } - lock, err := createResourceLock(cb, o.Namespace, o.Name) if err != nil { return err } // initialize the controllers and attempt to load the payload information - controllerCtx, err := o.NewControllerContext(cb, startingFeatureSet) + controllerCtx, err := o.NewControllerContext(cb) if err != nil { return err } @@ -268,7 +236,7 @@ func (o *Options) run(ctx context.Context, controllerCtx *Context, lock resource resultChannel <- asyncResult{name: "metrics server", error: err} }() } - if err := controllerCtx.CVO.InitializeFromPayload(runContext, restConfig, burstRestConfig); err != nil { + if err := controllerCtx.InitializeFromPayload(runContext, restConfig, burstRestConfig); err != nil { if firstError == nil { firstError = err } @@ -461,17 +429,19 @@ func getLeaderElectionConfig(ctx context.Context, restcfg *rest.Config) configv1 type Context struct { CVO *cvo.Operator AutoUpdate *autoupdate.Controller - StopOnFeatureGateChange *featurechangestopper.FeatureChangeStopper + StopOnFeatureGateChange *featuregates.ChangeStopper CVInformerFactory externalversions.SharedInformerFactory OpenshiftConfigInformerFactory informers.SharedInformerFactory OpenshiftConfigManagedInformerFactory informers.SharedInformerFactory InformerFactory externalversions.SharedInformerFactory + + fgLister configlistersv1.FeatureGateLister } // NewControllerContext initializes the default Context for the current Options. It does // not start any background processes. -func (o *Options) NewControllerContext(cb *ClientBuilder, startingFeatureSet string) (*Context, error) { +func (o *Options) NewControllerContext(cb *ClientBuilder) (*Context, error) { client := cb.ClientOrDie("shared-informer") kubeClient := cb.KubeClientOrDie(internal.ConfigNamespace, useProtobuf) @@ -484,10 +454,6 @@ func (o *Options) NewControllerContext(cb *ClientBuilder, startingFeatureSet str sharedInformers := externalversions.NewSharedInformerFactory(client, resyncPeriod(o.ResyncInterval)) coInformer := sharedInformers.Config().V1().ClusterOperators() - featureChangeStopper, err := featurechangestopper.New(startingFeatureSet, sharedInformers.Config().V1().FeatureGates()) - if err != nil { - return nil, err - } cvoKubeClient := cb.KubeClientOrDie(o.Namespace, useProtobuf) o.PromQLTarget.KubeClient = cvoKubeClient @@ -505,7 +471,6 @@ func (o *Options) NewControllerContext(cb *ClientBuilder, startingFeatureSet str cb.ClientOrDie(o.Namespace), cvoKubeClient, o.Exclude, - startingFeatureSet, o.ClusterProfile, o.PromQLTarget, o.InjectClusterIdIntoPromQL, @@ -514,6 +479,11 @@ func (o *Options) NewControllerContext(cb *ClientBuilder, startingFeatureSet str return nil, err } + featureChangeStopper, err := featuregates.NewChangeStopper(sharedInformers.Config().V1().FeatureGates()) + if err != nil { + return nil, err + } + ctx := &Context{ CVInformerFactory: cvInformer, OpenshiftConfigInformerFactory: openshiftConfigInformer, @@ -521,6 +491,8 @@ func (o *Options) NewControllerContext(cb *ClientBuilder, startingFeatureSet str InformerFactory: sharedInformers, CVO: cvo, StopOnFeatureGateChange: featureChangeStopper, + + fgLister: sharedInformers.Config().V1().FeatureGates().Lister(), } if o.EnableAutoUpdate { @@ -542,3 +514,67 @@ func (o *Options) NewControllerContext(cb *ClientBuilder, startingFeatureSet str } return ctx, nil } + +// InitializeFromPayload initializes the CVO and FeatureGate ChangeStoppers controllers from the payload. It extracts the +// current CVO version from the initial payload and uses it to determine the initial the required featureset and enabled +// feature gates. Both the payload and determined feature information are used to initialize CVO and feature gate +// ChangeStopper controllers. +func (c *Context) InitializeFromPayload(ctx context.Context, restConfig *rest.Config, burstRestConfig *rest.Config) error { + var startingFeatureSet configv1.FeatureSet + var clusterFeatureGate *configv1.FeatureGate + + // client-go automatically retries some network blip errors on GETs for 30s by default, and we want to + // retry the remaining ones ourselves. If we fail longer than that, the operator won't be able to do work + // anyway. Return the error and crashloop. + // + // We implement the timeout with a context because the timeout in PollImmediateWithContext does not behave + // well when ConditionFunc takes longer time to execute, like here where the GET can be retried by client-go + var lastError error + if err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 25*time.Second, true, func(ctx context.Context) (bool, error) { + gate, fgErr := c.fgLister.Get("cluster") + switch { + case apierrors.IsNotFound(fgErr): + // if we have no featuregates, then the cluster is using the default featureset, which is "". + // This excludes everything that could possibly depend on a different feature set. + startingFeatureSet = "" + klog.Infof("FeatureGate not found in cluster, using default feature set %q at startup", startingFeatureSet) + return true, nil + case fgErr != nil: + lastError = fgErr + klog.Warningf("Failed to get FeatureGate from cluster: %v", fgErr) + return false, nil + default: + clusterFeatureGate = gate + startingFeatureSet = gate.Spec.FeatureSet + klog.Infof("FeatureGate found in cluster, using its feature set %q at startup", startingFeatureSet) + return true, nil + } + }); err != nil { + if lastError != nil { + return lastError + } + return err + } + + payload, err := c.CVO.LoadInitialPayload(ctx, startingFeatureSet, restConfig) + if err != nil { + return err + } + + var cvoGates featuregates.CvoGates + if clusterFeatureGate != nil { + cvoGates = featuregates.CvoGatesFromFeatureGate(clusterFeatureGate, payload.Release.Version) + } else { + cvoGates = featuregates.DefaultCvoGates(payload.Release.Version) + } + + if cvoGates.UnknownVersion() { + klog.Infof("CVO features for version %s could not be detected from FeatureGate; will use defaults plus special UnknownVersion feature gate", payload.Release.Version) + } + klog.Infof("CVO features for version %s enabled at startup: %+v", payload.Release.Version, cvoGates) + + c.StopOnFeatureGateChange.SetStartingFeatures(startingFeatureSet, cvoGates) + c.CVO.InitializeFromPayload(payload, startingFeatureSet, cvoGates, restConfig, burstRestConfig) + + return nil +} diff --git a/pkg/start/start_integration_test.go b/pkg/start/start_integration_test.go index 9b224df44b..3fd2a2464a 100644 --- a/pkg/start/start_integration_test.go +++ b/pkg/start/start_integration_test.go @@ -184,13 +184,12 @@ func TestIntegrationCVO_initializeAndUpgrade(t *testing.T) { options.ReleaseImage = payloadImage1 options.PayloadOverride = filepath.Join(dir, "0.0.1") options.leaderElection = getLeaderElectionConfig(ctx, cfg) - startingFeatureSet := "" - controllers, err := options.NewControllerContext(cb, startingFeatureSet) + controllers, err := options.NewControllerContext(cb) if err != nil { t.Fatal(err) } - worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil, nil), 5*time.Second, wait.Backoff{Steps: 3}, "", startingFeatureSet, record.NewFakeRecorder(100), payload.DefaultClusterProfile) + worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil, nil), 5*time.Second, wait.Backoff{Steps: 3}, "", "", record.NewFakeRecorder(100), payload.DefaultClusterProfile) controllers.CVO.SetSyncWorkerForTesting(worker) lock, err := createResourceLock(cb, options.Namespace, options.Name) @@ -316,13 +315,12 @@ func TestIntegrationCVO_gracefulStepDown(t *testing.T) { options.ReleaseImage = payloadImage1 options.PayloadOverride = filepath.Join(dir, "0.0.1") options.leaderElection = getLeaderElectionConfig(ctx, cfg) - startingFeatureSet := "" - controllers, err := options.NewControllerContext(cb, startingFeatureSet) + controllers, err := options.NewControllerContext(cb) if err != nil { t.Fatal(err) } - worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil, nil), 5*time.Second, wait.Backoff{Steps: 3}, "", startingFeatureSet, record.NewFakeRecorder(100), payload.DefaultClusterProfile) + worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil, nil), 5*time.Second, wait.Backoff{Steps: 3}, "", "", record.NewFakeRecorder(100), payload.DefaultClusterProfile) controllers.CVO.SetSyncWorkerForTesting(worker) lock, err := createResourceLock(cb, options.Namespace, options.Name) @@ -510,13 +508,13 @@ metadata: options.ReleaseImage = payloadImage1 options.PayloadOverride = payloadDir options.leaderElection = getLeaderElectionConfig(ctx, cfg) - startingFeatureSet := "" - controllers, err := options.NewControllerContext(cb, startingFeatureSet) + + controllers, err := options.NewControllerContext(cb) if err != nil { t.Fatal(err) } - worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil, nil), 5*time.Second, wait.Backoff{Steps: 3}, "", startingFeatureSet, record.NewFakeRecorder(100), payload.DefaultClusterProfile) + worker := cvo.NewSyncWorker(retriever, cvo.NewResourceBuilder(cfg, cfg, nil, nil), 5*time.Second, wait.Backoff{Steps: 3}, "", "", record.NewFakeRecorder(100), payload.DefaultClusterProfile) controllers.CVO.SetSyncWorkerForTesting(worker) arch := runtime.GOARCH