From b014195c4389cad3122f493bba36ce124265a58b Mon Sep 17 00:00:00 2001 From: Marc Sluiter Date: Mon, 10 Jul 2023 19:16:08 +0200 Subject: [PATCH 1/4] Add feature gate for disabling the MHC controller There is a plan to let the Node Healthcheck Operator, which is an optional day 2 operator today, also implement the MHC controller. For being able to test the POC in NHC, we need to be able to disable MAO's MHC controller. So for now this feature gate is just for dev usage. The actual transition of the MHC controller from MAO to NHC will be discussed and planned in the future. Signed-off-by: Marc Sluiter --- pkg/operator/operator.go | 25 ++- pkg/operator/operator_test.go | 225 ++++++++++++++++++++------ pkg/operator/sync.go | 20 ++- pkg/util/featuregates/featuregates.go | 36 +++++ 4 files changed, 232 insertions(+), 74 deletions(-) create mode 100644 pkg/util/featuregates/featuregates.go diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index fb95deb07..2982b7bf0 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -13,8 +13,8 @@ import ( configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" machineclientset "github.com/openshift/client-go/machine/clientset/versioned" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/machine-api-operator/pkg/util/featuregates" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -353,19 +353,6 @@ func (optr *Operator) sync(key string) (reconcile.Result, error) { return optr.syncAll(operatorConfig) } -func getFeatureGate(lister configlistersv1.FeatureGateLister) (*osconfigv1.FeatureGate, error) { - featureGate, err := lister.Get("cluster") - if errors.IsNotFound(err) { - // No feature gate is set, therefore cannot be external. - // This is not an error as the feature gate is an optional resource. - return nil, nil - } else if err != nil { - return nil, fmt.Errorf("could not fetch featuregate: %v", err) - } - - return featureGate, nil -} - func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { infra, err := optr.osClient.ConfigV1().Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{}) if err != nil { @@ -382,7 +369,7 @@ func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { return nil, err } - featureGate, err := getFeatureGate(optr.featureGateLister) + featureGate, err := optr.osClient.ConfigV1().FeatureGates().Get(context.Background(), "cluster", metav1.GetOptions{}) if err != nil { return nil, err } @@ -412,6 +399,12 @@ func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { return nil, err } + // in case the MHC controller is disabled, leave its image empty + mhcImage := machineAPIOperatorImage + if !featuregates.IsDeployMHCControllerEnabled(featureGate) { + mhcImage = "" + } + return &OperatorConfig{ TargetNamespace: optr.namespace, Proxy: clusterWideProxy, @@ -419,7 +412,7 @@ func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { Provider: providerControllerImage, MachineSet: machineAPIOperatorImage, NodeLink: machineAPIOperatorImage, - MachineHealthCheck: machineAPIOperatorImage, + MachineHealthCheck: mhcImage, KubeRBACProxy: kubeRBACProxy, TerminationHandler: terminationHandlerImage, }, diff --git a/pkg/operator/operator_test.go b/pkg/operator/operator_test.go index e3aaea5ae..0508b4f9e 100644 --- a/pkg/operator/operator_test.go +++ b/pkg/operator/operator_test.go @@ -16,6 +16,7 @@ import ( configinformersv1 "github.com/openshift/client-go/config/informers/externalversions" fakemachine "github.com/openshift/client-go/machine/clientset/versioned/fake" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/machine-api-operator/pkg/util/featuregates" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -32,10 +33,9 @@ import ( ) const ( - deploymentName = "machine-api-controllers" - targetNamespace = "test-namespace" - hcControllerName = "machine-healthcheck-controller" - releaseVersion = "0.0.0.test-unit" + deploymentName = "machine-api-controllers" + targetNamespace = "test-namespace" + releaseVersion = "0.0.0.test-unit" ) func newFakeOperator(kubeObjects, osObjects, machineObjects []runtime.Object, imagesFile string, stopCh <-chan struct{}) (*Operator, error) { @@ -179,6 +179,17 @@ func TestOperatorSync_NoOp(t *testing.T) { }, } + featureGate := &openshiftv1.FeatureGate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: openshiftv1.FeatureGateSpec{ + FeatureGateSelection: openshiftv1.FeatureGateSelection{ + FeatureSet: openshiftv1.Default, + }, + }, + } + proxy := &openshiftv1.Proxy{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", @@ -187,7 +198,7 @@ func TestOperatorSync_NoOp(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - optr, err := newFakeOperator(nil, []runtime.Object{infra, proxy}, nil, imagesJSONFile, stopCh) + optr, err := newFakeOperator(nil, []runtime.Object{infra, featureGate, proxy}, nil, imagesJSONFile, stopCh) if err != nil { t.Fatal(err) } @@ -316,6 +327,17 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, } + featureGate := &openshiftv1.FeatureGate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: openshiftv1.FeatureGateSpec{ + FeatureGateSelection: openshiftv1.FeatureGateSelection{ + FeatureSet: openshiftv1.Default, + }, + }, + } + proxy := &openshiftv1.Proxy{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", @@ -326,16 +348,18 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { name string platform openshiftv1.PlatformType infra *openshiftv1.Infrastructure + featureGate *openshiftv1.FeatureGate proxy *openshiftv1.Proxy imagesFile string expectedConfig *OperatorConfig expectedError error }{ { - name: string(openshiftv1.AWSPlatformType), - platform: openshiftv1.AWSPlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.AWSPlatformType), + platform: openshiftv1.AWSPlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -351,10 +375,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.AlibabaCloudPlatformType), - platform: openshiftv1.AlibabaCloudPlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.AlibabaCloudPlatformType), + platform: openshiftv1.AlibabaCloudPlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -370,10 +395,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.LibvirtPlatformType), - platform: openshiftv1.LibvirtPlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.LibvirtPlatformType), + platform: openshiftv1.LibvirtPlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -389,10 +415,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.OpenStackPlatformType), - platform: openshiftv1.OpenStackPlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.OpenStackPlatformType), + platform: openshiftv1.OpenStackPlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -408,10 +435,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.AzurePlatformType), - platform: openshiftv1.AzurePlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.AzurePlatformType), + platform: openshiftv1.AzurePlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -427,10 +455,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.BareMetalPlatformType), - platform: openshiftv1.BareMetalPlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.BareMetalPlatformType), + platform: openshiftv1.BareMetalPlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -446,10 +475,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.GCPPlatformType), - platform: openshiftv1.GCPPlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.GCPPlatformType), + platform: openshiftv1.GCPPlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -465,10 +495,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(kubemarkPlatform), - platform: kubemarkPlatform, - infra: infra, - proxy: proxy, + name: string(kubemarkPlatform), + platform: kubemarkPlatform, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -484,10 +515,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.VSpherePlatformType), - platform: openshiftv1.VSpherePlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.VSpherePlatformType), + platform: openshiftv1.VSpherePlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -503,10 +535,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.OvirtPlatformType), - platform: openshiftv1.OvirtPlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.OvirtPlatformType), + platform: openshiftv1.OvirtPlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -522,10 +555,11 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.NonePlatformType), - platform: openshiftv1.NonePlatformType, - infra: infra, - proxy: proxy, + name: string(openshiftv1.NonePlatformType), + platform: openshiftv1.NonePlatformType, + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -541,10 +575,81 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: "bad-platform", - platform: "bad-platform", + // MHC controller being enabled is the default for now (which is covered by all other tests), + // but this test ensures that enabling works once the default changes + name: "mhc-controller-enabled", + platform: openshiftv1.BareMetalPlatformType, + infra: infra, + featureGate: &openshiftv1.FeatureGate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: openshiftv1.FeatureGateSpec{ + FeatureGateSelection: openshiftv1.FeatureGateSelection{ + FeatureSet: openshiftv1.CustomNoUpgrade, + CustomNoUpgrade: &openshiftv1.CustomFeatureGates{ + Enabled: []openshiftv1.FeatureGateName{ + openshiftv1.FeatureGateName(featuregates.DeployMHCControllerFeatureGateName), + }, + }, + }, + }, + }, + proxy: proxy, + expectedConfig: &OperatorConfig{ + TargetNamespace: targetNamespace, + Proxy: proxy, + Controllers: Controllers{ + Provider: images.ClusterAPIControllerBareMetal, + MachineSet: images.MachineAPIOperator, + NodeLink: images.MachineAPIOperator, + MachineHealthCheck: images.MachineAPIOperator, + TerminationHandler: clusterAPIControllerNoOp, + KubeRBACProxy: images.KubeRBACProxy, + }, + PlatformType: openshiftv1.BareMetalPlatformType, + }, + }, + { + name: "mhc-controller-disabled", + platform: openshiftv1.BareMetalPlatformType, infra: infra, - proxy: proxy, + featureGate: &openshiftv1.FeatureGate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: openshiftv1.FeatureGateSpec{ + FeatureGateSelection: openshiftv1.FeatureGateSelection{ + FeatureSet: openshiftv1.CustomNoUpgrade, + CustomNoUpgrade: &openshiftv1.CustomFeatureGates{ + Disabled: []openshiftv1.FeatureGateName{ + openshiftv1.FeatureGateName(featuregates.DeployMHCControllerFeatureGateName), + }, + }, + }, + }, + }, + proxy: proxy, + expectedConfig: &OperatorConfig{ + TargetNamespace: targetNamespace, + Proxy: proxy, + Controllers: Controllers{ + Provider: images.ClusterAPIControllerBareMetal, + MachineSet: images.MachineAPIOperator, + NodeLink: images.MachineAPIOperator, + MachineHealthCheck: "", + TerminationHandler: clusterAPIControllerNoOp, + KubeRBACProxy: images.KubeRBACProxy, + }, + PlatformType: openshiftv1.BareMetalPlatformType, + }, + }, + { + name: "bad-platform", + platform: "bad-platform", + infra: infra, + featureGate: featureGate, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -563,14 +668,25 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { name: "no-infra", platform: "no-infra", infra: nil, + featureGate: featureGate, proxy: proxy, expectedConfig: nil, expectedError: kerrors.NewNotFound(schema.GroupResource{Group: "config.openshift.io", Resource: "infrastructures"}, "cluster"), }, + { + name: "no-featuregate", + platform: "no-featuregate", + infra: infra, + featureGate: nil, + proxy: proxy, + expectedConfig: nil, + expectedError: kerrors.NewNotFound(schema.GroupResource{Group: "config.openshift.io", Resource: "featuregates"}, "cluster"), + }, { name: "no-proxy", platform: "no-proxy", infra: infra, + featureGate: featureGate, proxy: nil, expectedConfig: nil, expectedError: kerrors.NewNotFound(schema.GroupResource{Group: "config.openshift.io", Resource: "proxies"}, "cluster"), @@ -579,6 +695,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { name: "no-platform", platform: "", infra: infra, + featureGate: featureGate, proxy: proxy, expectedConfig: nil, expectedError: errors.New("no platform provider found on install config"), @@ -587,6 +704,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { name: "no-images-file", platform: openshiftv1.NonePlatformType, infra: infra, + featureGate: featureGate, proxy: proxy, imagesFile: "fixtures/not-found.json", expectedConfig: nil, @@ -613,6 +731,9 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { inf.Status.PlatformStatus = &openshiftv1.PlatformStatus{Type: tc.platform} objects = append(objects, inf) } + if tc.featureGate != nil { + objects = append(objects, tc.featureGate.DeepCopy()) + } if tc.proxy != nil { proxy := tc.proxy.DeepCopy() objects = append(objects, proxy) diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 5ac69d6ae..8eb5349a2 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -499,7 +499,8 @@ func newRBACConfigVolumes() []corev1.Volume { func newPodTemplateSpec(config *OperatorConfig, features map[string]bool) *corev1.PodTemplateSpec { containers := newContainers(config, features) - proxyContainers := newKubeProxyContainers(config.Controllers.KubeRBACProxy) + withMHCProxy := config.Controllers.MachineHealthCheck != "" + proxyContainers := newKubeProxyContainers(config.Controllers.KubeRBACProxy, withMHCProxy) tolerations := []corev1.Toleration{ { Key: "node-role.kubernetes.io/master", @@ -757,7 +758,9 @@ func newContainers(config *OperatorConfig, features map[string]bool) []corev1.Co Env: proxyEnvArgs, Resources: resources, }, - { + } + if config.Controllers.MachineHealthCheck != "" { + containers = append(containers, corev1.Container{ Name: "machine-healthcheck-controller", Image: config.Controllers.MachineHealthCheck, Command: []string{"/machine-healthcheck"}, @@ -786,17 +789,22 @@ func newContainers(config *OperatorConfig, features map[string]bool) []corev1.Co }, }, }, - }, + }) } return containers } -func newKubeProxyContainers(image string) []corev1.Container { - return []corev1.Container{ +func newKubeProxyContainers(image string, withMHCProxy bool) []corev1.Container { + proxyContainers := []corev1.Container{ newKubeProxyContainer(image, "machineset-mtrc", metrics.DefaultMachineSetMetricsAddress, machineSetExposeMetricsPort), newKubeProxyContainer(image, "machine-mtrc", metrics.DefaultMachineMetricsAddress, machineExposeMetricsPort), - newKubeProxyContainer(image, "mhc-mtrc", metrics.DefaultHealthCheckMetricsAddress, machineHealthCheckExposeMetricsPort), } + if withMHCProxy { + proxyContainers = append(proxyContainers, + newKubeProxyContainer(image, "mhc-mtrc", metrics.DefaultHealthCheckMetricsAddress, machineHealthCheckExposeMetricsPort), + ) + } + return proxyContainers } func newKubeProxyContainer(image, portName, upstreamPort string, exposePort int32) corev1.Container { diff --git a/pkg/util/featuregates/featuregates.go b/pkg/util/featuregates/featuregates.go new file mode 100644 index 000000000..b89bc81ab --- /dev/null +++ b/pkg/util/featuregates/featuregates.go @@ -0,0 +1,36 @@ +package featuregates + +import ( + v1 "github.com/openshift/api/config/v1" + "k8s.io/klog/v2" +) + +const ( + // DeployMHCControllerFeatureGateName is the name of the feature gate for enabling the MHC controller + DeployMHCControllerFeatureGateName = "MachineAPIOperatorDeployMHCController" +) + +// IsDeployMHCControllerEnabled returns if the feature gate for the MHC controller deployment is enabled. +// For now this is an experimental feature gate, and we only check if it's disabled via the CustomNoUpgrade feature set. +// The purpose is to disable the MHC controller for being able to test the upcoming MHC feature of the NodeHealthCheck operator. +// Whenever NHC becomes the default MHC handler, the default return value needs to be changed to false! +func IsDeployMHCControllerEnabled(fg *v1.FeatureGate) bool { + deployMHCControllerFeatureGate := v1.FeatureGateName(DeployMHCControllerFeatureGateName) + if fg != nil && fg.Spec.CustomNoUpgrade != nil { + for _, enabled := range fg.Spec.CustomNoUpgrade.Enabled { + if enabled == deployMHCControllerFeatureGate { + klog.V(2).Info("MHC controller enabled by feature gate") + return true + } + } + for _, disabled := range fg.Spec.CustomNoUpgrade.Disabled { + if disabled == deployMHCControllerFeatureGate { + klog.V(2).Info("MHC controller disabled by feature gate") + return false + } + } + } + // switch to false once NHC is the default! + klog.V(4).Info("MHC controller enabled (default)") + return true +} From c1d10d5588f0db22fda38d3234dc215b05c9c9df Mon Sep 17 00:00:00 2001 From: Marc Sluiter Date: Tue, 11 Jul 2023 12:14:48 +0200 Subject: [PATCH 2/4] Update openshift/api for new feature gate Signed-off-by: Marc Sluiter --- go.mod | 2 +- go.sum | 4 +- .../openshift/api/config/v1/feature_gates.go | 20 +++ .../openshift/api/config/v1/types_feature.go | 4 +- .../machine/v1beta1/types_azureprovider.go | 118 +++++++++++++++++- .../machine/v1beta1/zz_generated.deepcopy.go | 95 ++++++++++++++ .../zz_generated.swagger_doc_generated.go | 53 +++++++- .../route/v1/route-CustomNoUpgrade.crd.yaml | 4 +- .../v1/route-TechPreviewNoUpgrade.crd.yaml | 4 +- vendor/modules.txt | 2 +- 10 files changed, 293 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index fa99a8d64..46bc0859c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/google/uuid v1.3.0 github.com/onsi/ginkgo/v2 v2.9.5 github.com/onsi/gomega v1.27.7 - github.com/openshift/api v0.0.0-20230703162140-6e9853e4c905 + github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb github.com/openshift/library-go v0.0.0-20230508110756-9b7abe2c9cbf github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b diff --git a/go.sum b/go.sum index e20f14a69..9d4674efe 100644 --- a/go.sum +++ b/go.sum @@ -631,8 +631,8 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= -github.com/openshift/api v0.0.0-20230703162140-6e9853e4c905 h1:zvzzN6z/QxwQ6KiHnGb/DuVSOhHkYX6SqkPILdwc/3s= -github.com/openshift/api v0.0.0-20230703162140-6e9853e4c905/go.mod h1:4VWG+W22wrB4HfBL88P40DxLEpSOaiBVxUnfalfJo9k= +github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 h1:j7LIIr4Vrdy4Dpd4bw2j53UXUSjA1eXXC0x89g9kyAI= +github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb h1:Nij5OnaECrkmcRQMAE9LMbQXPo95aqFnf+12B7SyFVI= github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb/go.mod h1:Rhb3moCqeiTuGHAbXBOlwPubUMlOZEkrEWTRjIF3jzs= github.com/openshift/library-go v0.0.0-20230508110756-9b7abe2c9cbf h1:ZpFAN2qprgp7jEhGPrOAwP8mmuYC9BRYzvDefg+k4GM= diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go index 82ca5d85a..939d17065 100644 --- a/vendor/github.com/openshift/api/config/v1/feature_gates.go +++ b/vendor/github.com/openshift/api/config/v1/feature_gates.go @@ -281,4 +281,24 @@ var ( ResponsiblePerson: "thejasn", OwningProduct: ocpSpecific, } + + FeatureGateAutomatedEtcdBackup = FeatureGateName("AutomatedEtcdBackup") + automatedEtcdBackup = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateAutomatedEtcdBackup, + }, + OwningJiraComponent: "etcd", + ResponsiblePerson: "hasbro17", + OwningProduct: ocpSpecific, + } + + FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = FeatureGateName("MachineAPIOperatorDisableMachineHealthCheckController") + machineAPIOperatorDisableMachineHealthCheckController = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateMachineAPIOperatorDisableMachineHealthCheckController, + }, + OwningJiraComponent: "ecoproject", + ResponsiblePerson: "msluiter", + OwningProduct: ocpSpecific, + } ) diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index d445c345a..d6997ecdb 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -164,7 +164,6 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ }, TechPreviewNoUpgrade: newDefaultFeatures(). with(externalCloudProvider). - with(externalCloudProviderAzure). with(externalCloudProviderGCP). with(externalCloudProviderExternal). with(csiDriverSharedResource). @@ -186,6 +185,8 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ with(gcpLabelsTags). with(vSphereStaticIPs). with(routeExternalCertificate). + with(automatedEtcdBackup). + without(machineAPIOperatorDisableMachineHealthCheckController). toFeatures(defaultFeatures), LatencySensitive: newDefaultFeatures(). toFeatures(defaultFeatures), @@ -196,6 +197,7 @@ var defaultFeatures = &FeatureGateEnabledDisabled{ openShiftPodSecurityAdmission, alibabaPlatform, // This is a bug, it should be TechPreviewNoUpgrade. This must be downgraded before 4.14 is shipped. cloudDualStackNodeIPs, + externalCloudProviderAzure, }, Disabled: []FeatureGateDescription{ retroactiveDefaultStorageClass, diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go index 547da1af7..1d565e5d2 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -6,6 +6,28 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// SecurityEncryptionTypes represents the Encryption Type when the Azure Virtual Machine is a +// Confidential VM. +type SecurityEncryptionTypes string + +const ( + // SecurityEncryptionTypesVMGuestStateOnly disables OS disk confidential encryption. + SecurityEncryptionTypesVMGuestStateOnly SecurityEncryptionTypes = "VMGuestStateOnly" + // SecurityEncryptionTypesDiskWithVMGuestState enables OS disk confidential encryption with a + // platform-managed key (PMK) or a customer-managed key (CMK). + SecurityEncryptionTypesDiskWithVMGuestState SecurityEncryptionTypes = "DiskWithVMGuestState" +) + +// SecurityTypes represents the SecurityType of the virtual machine. +type SecurityTypes string + +const ( + // SecurityTypesConfidentialVM defines the SecurityType of the virtual machine as a Confidential VM. + SecurityTypesConfidentialVM SecurityTypes = "ConfidentialVM" + // SecurityTypesTrustedLaunch defines the SecurityType of the virtual machine as a Trusted Launch VM. + SecurityTypesTrustedLaunch SecurityTypes = "TrustedLaunch" +) + // AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field // for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. // Required parameters such as location that are not specified by this configuration, will be defaulted @@ -397,6 +419,36 @@ type OSDiskManagedDiskParameters struct { // DiskEncryptionSet is the disk encryption set properties // +optional DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + // securityProfile specifies the security profile for the managed disk. + // +optional + SecurityProfile VMDiskSecurityProfile `json:"securityProfile,omitempty"` +} + +// VMDiskSecurityProfile specifies the security profile settings for the managed disk. +// It can be set only for Confidential VMs. +type VMDiskSecurityProfile struct { + // diskEncryptionSet specifies the customer managed disk encryption set resource id for the + // managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and + // VMGuest blob. + // +optional + DiskEncryptionSet DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + // securityEncryptionType specifies the encryption type of the managed disk. + // It is set to DiskWithVMGuestState to encrypt the managed disk along with the VMGuestState + // blob, and to VMGuestStateOnly to encrypt the VMGuestState blob only. + // When set to VMGuestStateOnly, the vTPM should be enabled. + // When set to DiskWithVMGuestState, both SecureBoot and vTPM should be enabled. + // If the above conditions are not fulfilled, the VM will not be created and the respective error + // will be returned. + // It can be set only for Confidential VMs. Confidential VMs are defined by their + // SecurityProfile.SecurityType being set to ConfidentialVM, the SecurityEncryptionType of their + // OS disk being set to one of the allowed values and by enabling the respective + // SecurityProfile.UEFISettings of the VM (i.e. vTPM and SecureBoot), depending on the selected + // SecurityEncryptionType. + // For further details on Azure Confidential VMs, please refer to the respective documentation: + // https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview + // +kubebuilder:validation:Enum=VMGuestStateOnly;DiskWithVMGuestState + // +optional + SecurityEncryptionType SecurityEncryptionTypes `json:"securityEncryptionType,omitempty"` } // DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk. @@ -437,11 +489,71 @@ type DiskEncryptionSetParameters struct { // SecurityProfile specifies the Security profile settings for a // virtual machine or virtual machine scale set. type SecurityProfile struct { - // This field indicates whether Host Encryption should be enabled - // or disabled for a virtual machine or virtual machine scale - // set. Default is disabled. + // encryptionAtHost indicates whether Host Encryption should be enabled or disabled for a virtual + // machine or virtual machine scale set. + // This should be disabled when SecurityEncryptionType is set to DiskWithVMGuestState. + // Default is disabled. // +optional EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` + // settings specify the security type and the UEFI settings of the virtual machine. This field can + // be set for Confidential VMs and Trusted Launch for VMs. + // +optional + Settings SecuritySettings `json:"settings,omitempty"` +} + +// SecuritySettings define the security type and the UEFI settings of the virtual machine. +// +union +type SecuritySettings struct { + // securityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to + // enable UEFISettings. The default behavior is: UEFISettings will not be enabled unless this property is set. + // +kubebuilder:validation:Enum=ConfidentialVM;TrustedLaunch + // +kubebuilder:validation:Required + // +unionDiscriminator + SecurityType SecurityTypes `json:"securityType,omitempty"` + // confidentialVM specifies the security configuration of the virtual machine. + // For more information regarding Confidential VMs, please refer to: + // https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview + // +optional + ConfidentialVM *ConfidentialVM `json:"confidentialVM,omitempty"` + // trustedLaunch specifies the security configuration of the virtual machine. + // For more information regarding TrustedLaunch for VMs, please refer to: + // https://learn.microsoft.com/azure/virtual-machines/trusted-launch + // +optional + TrustedLaunch *TrustedLaunch `json:"trustedLaunch,omitempty"` +} + +// ConfidentialVM defines the UEFI settings for the virtual machine. +type ConfidentialVM struct { + // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. + // +kubebuilder:validation:Required + UEFISettings UEFISettings `json:"uefiSettings,omitempty"` +} + +// TrustedLaunch defines the UEFI settings for the virtual machine. +type TrustedLaunch struct { + // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. + // +kubebuilder:validation:Required + UEFISettings UEFISettings `json:"uefiSettings,omitempty"` +} + +// UEFISettings specifies the security settings like secure boot and vTPM used while creating the +// virtual machine. +type UEFISettings struct { + // secureBoot specifies whether secure boot should be enabled on the virtual machine. + // Secure Boot verifies the digital signature of all boot components and halts the boot process if + // signature verification fails. + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + SecureBoot SecureBootPolicy `json:"secureBoot,omitempty"` + // virtualizedTrustedPlatformModule specifies whether vTPM should be enabled on the virtual machine. + // When enabled the virtualized trusted platform module measurements are used to create a known good boot integrity policy baseline. + // The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. + // This is required to be enabled if SecurityEncryptionType is defined. + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + VirtualizedTrustedPlatformModule VirtualizedTrustedPlatformModulePolicy `json:"virtualizedTrustedPlatformModule,omitempty"` } // AzureUltraSSDCapabilityState defines the different states of an UltraSSDCapability diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index f4dfeb7a9..4cca127cb 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -450,6 +450,23 @@ func (in Conditions) DeepCopy() Conditions { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialVM) DeepCopyInto(out *ConfidentialVM) { + *out = *in + out.UEFISettings = in.UEFISettings + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialVM. +func (in *ConfidentialVM) DeepCopy() *ConfidentialVM { + if in == nil { + return nil + } + out := new(ConfidentialVM) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataDisk) DeepCopyInto(out *DataDisk) { *out = *in @@ -1490,6 +1507,7 @@ func (in *OSDiskManagedDiskParameters) DeepCopyInto(out *OSDiskManagedDiskParame *out = new(DiskEncryptionSetParameters) **out = **in } + out.SecurityProfile = in.SecurityProfile return } @@ -1585,6 +1603,7 @@ func (in *SecurityProfile) DeepCopyInto(out *SecurityProfile) { *out = new(bool) **out = **in } + in.Settings.DeepCopyInto(&out.Settings) return } @@ -1598,6 +1617,32 @@ func (in *SecurityProfile) DeepCopy() *SecurityProfile { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecuritySettings) DeepCopyInto(out *SecuritySettings) { + *out = *in + if in.ConfidentialVM != nil { + in, out := &in.ConfidentialVM, &out.ConfidentialVM + *out = new(ConfidentialVM) + **out = **in + } + if in.TrustedLaunch != nil { + in, out := &in.TrustedLaunch, &out.TrustedLaunch + *out = new(TrustedLaunch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySettings. +func (in *SecuritySettings) DeepCopy() *SecuritySettings { + if in == nil { + return nil + } + out := new(SecuritySettings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) { *out = *in @@ -1656,6 +1701,39 @@ func (in *TagSpecification) DeepCopy() *TagSpecification { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedLaunch) DeepCopyInto(out *TrustedLaunch) { + *out = *in + out.UEFISettings = in.UEFISettings + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedLaunch. +func (in *TrustedLaunch) DeepCopy() *TrustedLaunch { + if in == nil { + return nil + } + out := new(TrustedLaunch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UEFISettings) DeepCopyInto(out *UEFISettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UEFISettings. +func (in *UEFISettings) DeepCopy() *UEFISettings { + if in == nil { + return nil + } + out := new(UEFISettings) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UnhealthyCondition) DeepCopyInto(out *UnhealthyCondition) { *out = *in @@ -1673,6 +1751,23 @@ func (in *UnhealthyCondition) DeepCopy() *UnhealthyCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMDiskSecurityProfile) DeepCopyInto(out *VMDiskSecurityProfile) { + *out = *in + out.DiskEncryptionSet = in.DiskEncryptionSet + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMDiskSecurityProfile. +func (in *VMDiskSecurityProfile) DeepCopy() *VMDiskSecurityProfile { + if in == nil { + return nil + } + out := new(VMDiskSecurityProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) { *out = *in diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index c6128a769..108ba557c 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -224,6 +224,15 @@ func (AzureMachineProviderStatus) SwaggerDoc() map[string]string { return map_AzureMachineProviderStatus } +var map_ConfidentialVM = map[string]string{ + "": "ConfidentialVM defines the UEFI settings for the virtual machine.", + "uefiSettings": "uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.", +} + +func (ConfidentialVM) SwaggerDoc() map[string]string { + return map_ConfidentialVM +} + var map_DataDisk = map[string]string{ "": "DataDisk specifies the parameters that are used to add one or more data disks to the machine. A Data Disk is a managed disk that's attached to a virtual machine to store application data. It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.\n\nAs the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. At this stage the previously defined `lun` is to be used as the \"device\" key for referencing the raw disk device to be initialized. Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. For further guidance and examples, please refer to the official OpenShift docs.", "nameSuffix": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", @@ -296,6 +305,7 @@ var map_OSDiskManagedDiskParameters = map[string]string{ "": "OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk.", "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties", + "securityProfile": "securityProfile specifies the security profile for the managed disk.", } func (OSDiskManagedDiskParameters) SwaggerDoc() map[string]string { @@ -304,13 +314,25 @@ func (OSDiskManagedDiskParameters) SwaggerDoc() map[string]string { var map_SecurityProfile = map[string]string{ "": "SecurityProfile specifies the Security profile settings for a virtual machine or virtual machine scale set.", - "encryptionAtHost": "This field indicates whether Host Encryption should be enabled or disabled for a virtual machine or virtual machine scale set. Default is disabled.", + "encryptionAtHost": "encryptionAtHost indicates whether Host Encryption should be enabled or disabled for a virtual machine or virtual machine scale set. This should be disabled when SecurityEncryptionType is set to DiskWithVMGuestState. Default is disabled.", + "settings": "settings specify the security type and the UEFI settings of the virtual machine. This field can be set for Confidential VMs and Trusted Launch for VMs.", } func (SecurityProfile) SwaggerDoc() map[string]string { return map_SecurityProfile } +var map_SecuritySettings = map[string]string{ + "": "SecuritySettings define the security type and the UEFI settings of the virtual machine.", + "securityType": "securityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UEFISettings. The default behavior is: UEFISettings will not be enabled unless this property is set.", + "confidentialVM": "confidentialVM specifies the security configuration of the virtual machine. For more information regarding Confidential VMs, please refer to: https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview", + "trustedLaunch": "trustedLaunch specifies the security configuration of the virtual machine. For more information regarding TrustedLaunch for VMs, please refer to: https://learn.microsoft.com/azure/virtual-machines/trusted-launch", +} + +func (SecuritySettings) SwaggerDoc() map[string]string { + return map_SecuritySettings +} + var map_SpotVMOptions = map[string]string{ "": "SpotVMOptions defines the options relevant to running the Machine on Spot VMs", "maxPrice": "MaxPrice defines the maximum price the user is willing to pay for Spot VM instances", @@ -320,6 +342,35 @@ func (SpotVMOptions) SwaggerDoc() map[string]string { return map_SpotVMOptions } +var map_TrustedLaunch = map[string]string{ + "": "TrustedLaunch defines the UEFI settings for the virtual machine.", + "uefiSettings": "uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.", +} + +func (TrustedLaunch) SwaggerDoc() map[string]string { + return map_TrustedLaunch +} + +var map_UEFISettings = map[string]string{ + "": "UEFISettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.", + "secureBoot": "secureBoot specifies whether secure boot should be enabled on the virtual machine. Secure Boot verifies the digital signature of all boot components and halts the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled.", + "virtualizedTrustedPlatformModule": "virtualizedTrustedPlatformModule specifies whether vTPM should be enabled on the virtual machine. When enabled the virtualized trusted platform module measurements are used to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be enabled if SecurityEncryptionType is defined. If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled.", +} + +func (UEFISettings) SwaggerDoc() map[string]string { + return map_UEFISettings +} + +var map_VMDiskSecurityProfile = map[string]string{ + "": "VMDiskSecurityProfile specifies the security profile settings for the managed disk. It can be set only for Confidential VMs.", + "diskEncryptionSet": "diskEncryptionSet specifies the customer managed disk encryption set resource id for the managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and VMGuest blob.", + "securityEncryptionType": "securityEncryptionType specifies the encryption type of the managed disk. It is set to DiskWithVMGuestState to encrypt the managed disk along with the VMGuestState blob, and to VMGuestStateOnly to encrypt the VMGuestState blob only. When set to VMGuestStateOnly, the vTPM should be enabled. When set to DiskWithVMGuestState, both SecureBoot and vTPM should be enabled. If the above conditions are not fulfilled, the VM will not be created and the respective error will be returned. It can be set only for Confidential VMs. Confidential VMs are defined by their SecurityProfile.SecurityType being set to ConfidentialVM, the SecurityEncryptionType of their OS disk being set to one of the allowed values and by enabling the respective SecurityProfile.UEFISettings of the VM (i.e. vTPM and SecureBoot), depending on the selected SecurityEncryptionType. For further details on Azure Confidential VMs, please refer to the respective documentation: https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview", +} + +func (VMDiskSecurityProfile) SwaggerDoc() map[string]string { + return map_VMDiskSecurityProfile +} + var map_GCPDisk = map[string]string{ "": "GCPDisk describes disks for GCP.", "autoDelete": "AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", diff --git a/vendor/github.com/openshift/api/route/v1/route-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/route/v1/route-CustomNoUpgrade.crd.yaml index 7e5ebd227..360d60053 100644 --- a/vendor/github.com/openshift/api/route/v1/route-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/route/v1/route-CustomNoUpgrade.crd.yaml @@ -145,10 +145,10 @@ spec: - reencrypt - passthrough x-kubernetes-validations: - - rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' - message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' - rule: '!(has(self.certificate) && has(self.externalCertificate))' message: cannot have both spec.tls.certificate and spec.tls.externalCertificate + - rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' + message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' to: description: to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. type: object diff --git a/vendor/github.com/openshift/api/route/v1/route-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/route/v1/route-TechPreviewNoUpgrade.crd.yaml index d9b016564..fd6678f44 100644 --- a/vendor/github.com/openshift/api/route/v1/route-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/route/v1/route-TechPreviewNoUpgrade.crd.yaml @@ -145,10 +145,10 @@ spec: - reencrypt - passthrough x-kubernetes-validations: - - rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' - message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' - rule: '!(has(self.certificate) && has(self.externalCertificate))' message: cannot have both spec.tls.certificate and spec.tls.externalCertificate + - rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' + message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' to: description: to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. type: object diff --git a/vendor/modules.txt b/vendor/modules.txt index 9bd1c6f97..94d56feab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -572,7 +572,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/openshift/api v0.0.0-20230703162140-6e9853e4c905 +# github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 ## explicit; go 1.20 github.com/openshift/api github.com/openshift/api/apiserver From 772a8b0d485e316e90fda5befddcba7a460db20e Mon Sep 17 00:00:00 2001 From: Marc Sluiter Date: Wed, 12 Jul 2023 00:14:15 +0200 Subject: [PATCH 3/4] Update openshift/library for feature gate accessor Signed-off-by: Marc Sluiter --- go.mod | 3 +- go.sum | 6 +- .../pkg/controller/factory/base_controller.go | 276 ++++ .../controller/factory/controller_context.go | 116 ++ .../pkg/controller/factory/eventfilters.go | 26 + .../pkg/controller/factory/factory.go | 309 ++++ .../pkg/controller/factory/interfaces.go | 47 + .../openshift/library-go/pkg/crypto/OWNERS | 4 + .../openshift/library-go/pkg/crypto/crypto.go | 1252 +++++++++++++++++ .../library-go/pkg/crypto/rotation.go | 20 + .../pkg/operator/condition/condition.go | 72 + .../config_observer_controller.go | 284 ++++ .../featuregates/featuregate.go | 47 + .../hardcoded_featuregate_reader.go | 78 + .../featuregates/observe_featuregates.go | 118 ++ .../featuregates/simple_featuregate_reader.go | 318 +++++ .../operator/configobserver/unstructured.go | 45 + .../operator/management/management_state.go | 77 + .../resource/resourceapply/storage.go | 18 + .../operator/resourcesynccontroller/core.go | 67 + .../resourcesynccontroller/interfaces.go | 41 + .../resourcesync_controller.go | 340 +++++ vendor/github.com/robfig/cron/.gitignore | 22 + vendor/github.com/robfig/cron/.travis.yml | 1 + vendor/github.com/robfig/cron/LICENSE | 21 + vendor/github.com/robfig/cron/README.md | 6 + .../github.com/robfig/cron/constantdelay.go | 27 + vendor/github.com/robfig/cron/cron.go | 259 ++++ vendor/github.com/robfig/cron/doc.go | 129 ++ vendor/github.com/robfig/cron/parser.go | 380 +++++ vendor/github.com/robfig/cron/spec.go | 158 +++ .../apiserver/pkg/authentication/user/doc.go | 19 + .../apiserver/pkg/authentication/user/user.go | 84 ++ vendor/modules.txt | 13 +- 34 files changed, 4679 insertions(+), 4 deletions(-) create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/crypto.go create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/rotation.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/featuregate.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/hardcoded_featuregate_reader.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/simple_featuregate_reader.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go create mode 100644 vendor/github.com/robfig/cron/.gitignore create mode 100644 vendor/github.com/robfig/cron/.travis.yml create mode 100644 vendor/github.com/robfig/cron/LICENSE create mode 100644 vendor/github.com/robfig/cron/README.md create mode 100644 vendor/github.com/robfig/cron/constantdelay.go create mode 100644 vendor/github.com/robfig/cron/cron.go create mode 100644 vendor/github.com/robfig/cron/doc.go create mode 100644 vendor/github.com/robfig/cron/parser.go create mode 100644 vendor/github.com/robfig/cron/spec.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/user.go diff --git a/go.mod b/go.mod index 46bc0859c..6a808bda8 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/onsi/gomega v1.27.7 github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb - github.com/openshift/library-go v0.0.0-20230508110756-9b7abe2c9cbf + github.com/openshift/library-go v0.0.0-20230706195801-561433066966 github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b github.com/prometheus/client_golang v1.15.1 github.com/spf13/cobra v1.6.1 @@ -189,6 +189,7 @@ require ( github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rivo/uniseg v0.4.2 // indirect + github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryancurrah/gomodguard v1.3.0 // indirect github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect diff --git a/go.sum b/go.sum index 9d4674efe..474de74cb 100644 --- a/go.sum +++ b/go.sum @@ -635,8 +635,8 @@ github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64 h1:j7LIIr4Vrdy4Dpd4b github.com/openshift/api v0.0.0-20230711095040-ca06f4a23b64/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb h1:Nij5OnaECrkmcRQMAE9LMbQXPo95aqFnf+12B7SyFVI= github.com/openshift/client-go v0.0.0-20230503144108-75015d2347cb/go.mod h1:Rhb3moCqeiTuGHAbXBOlwPubUMlOZEkrEWTRjIF3jzs= -github.com/openshift/library-go v0.0.0-20230508110756-9b7abe2c9cbf h1:ZpFAN2qprgp7jEhGPrOAwP8mmuYC9BRYzvDefg+k4GM= -github.com/openshift/library-go v0.0.0-20230508110756-9b7abe2c9cbf/go.mod h1:PJVatR/oS/EaFciwylyAr9hORSqQHrC+5bXf4L0wsBY= +github.com/openshift/library-go v0.0.0-20230706195801-561433066966 h1:qJZaVzxJy7s6Cp1908rkSR64YCrpiKMZAkfYhsZPPCw= +github.com/openshift/library-go v0.0.0-20230706195801-561433066966/go.mod h1:PegtilvJPBJXjJG3AV8uL1a0SAnBr6K67ShNiWVb40M= github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b h1:Q1q8w51pAZdx6LEkaYdSbUaaEOHXTyTXLhtGgIiKaiA= github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b/go.mod h1:iVyukRkam5JZa8AnjYf+/G3rk7JI1+M6GsU0sq0B9NA= github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= @@ -700,6 +700,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uY github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go new file mode 100644 index 000000000..722d95d5e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go @@ -0,0 +1,276 @@ +package factory + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/robfig/cron" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/v1helpers" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// SyntheticRequeueError can be returned from sync() in case of forcing a sync() retry artificially. +// This can be also done by re-adding the key to queue, but this is cheaper and more convenient. +var SyntheticRequeueError = errors.New("synthetic requeue request") + +var defaultCacheSyncTimeout = 10 * time.Minute + +// baseController represents generic Kubernetes controller boiler-plate +type baseController struct { + name string + cachesToSync []cache.InformerSynced + sync func(ctx context.Context, controllerContext SyncContext) error + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncEvery time.Duration + resyncSchedules []cron.Schedule + postStartHooks []PostStartHook + cacheSyncTimeout time.Duration +} + +var _ Controller = &baseController{} + +func (c baseController) Name() string { + return c.name +} + +type scheduledJob struct { + queue workqueue.RateLimitingInterface + name string +} + +func newScheduledJob(name string, queue workqueue.RateLimitingInterface) cron.Job { + return &scheduledJob{ + queue: queue, + name: name, + } +} + +func (s *scheduledJob) Run() { + klog.V(4).Infof("Triggering scheduled %q controller run", s.name) + s.queue.Add(DefaultQueueKey) +} + +func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { + return fmt.Errorf("unable to sync caches for %s", controllerName) + } + + klog.Infof("Caches are synced for %s ", controllerName) + + return nil +} + +func (c *baseController) Run(ctx context.Context, workers int) { + // HandleCrash recovers panics + defer utilruntime.HandleCrash(c.degradedPanicHandler) + + // give caches 10 minutes to sync + cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout) + defer cacheSyncCancel() + err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...) + if err != nil { + select { + case <-ctx.Done(): + // Exit gracefully because the controller was requested to stop. + return + default: + // If caches did not sync after 10 minutes, it has taken oddly long and + // we should provide feedback. Since the control loops will never start, + // it is safer to exit with a good message than to continue with a dead loop. + // TODO: Consider making this behavior configurable. + klog.Exit(err) + } + } + + var workerWg sync.WaitGroup + defer func() { + defer klog.Infof("All %s workers have been terminated", c.name) + workerWg.Wait() + }() + + // queueContext is used to track and initiate queue shutdown + queueContext, queueContextCancel := context.WithCancel(context.TODO()) + + for i := 1; i <= workers; i++ { + klog.Infof("Starting #%d worker of %s controller ...", i, c.name) + workerWg.Add(1) + go func() { + defer func() { + klog.Infof("Shutting down worker of %s controller ...", c.name) + workerWg.Done() + }() + c.runWorker(queueContext) + }() + } + + // if scheduled run is requested, run the cron scheduler + if c.resyncSchedules != nil { + scheduler := cron.New() + for _, s := range c.resyncSchedules { + scheduler.Schedule(s, newScheduledJob(c.name, c.syncContext.Queue())) + } + scheduler.Start() + defer scheduler.Stop() + } + + // runPeriodicalResync is independent from queue + if c.resyncEvery > 0 { + workerWg.Add(1) + if c.resyncEvery < 60*time.Second { + // Warn about too fast resyncs as they might drain the operators QPS. + // This event is cheap as it is only emitted on operator startup. + c.syncContext.Recorder().Warningf("FastControllerResync", "Controller %q resync interval is set to %s which might lead to client request throttling", c.name, c.resyncEvery) + } + go func() { + defer workerWg.Done() + wait.UntilWithContext(ctx, func(ctx context.Context) { c.syncContext.Queue().Add(DefaultQueueKey) }, c.resyncEvery) + }() + } + + // run post-start hooks (custom triggers, etc.) + if len(c.postStartHooks) > 0 { + var hookWg sync.WaitGroup + defer func() { + hookWg.Wait() // wait for the post-start hooks + klog.Infof("All %s post start hooks have been terminated", c.name) + }() + for i := range c.postStartHooks { + hookWg.Add(1) + go func(index int) { + defer hookWg.Done() + if err := c.postStartHooks[index](ctx, c.syncContext); err != nil { + klog.Warningf("%s controller post start hook error: %v", c.name, err) + } + }(i) + } + } + + // Handle controller shutdown + + <-ctx.Done() // wait for controller context to be cancelled + c.syncContext.Queue().ShutDown() // shutdown the controller queue first + queueContextCancel() // cancel the queue context, which tell workers to initiate shutdown + + // Wait for all workers to finish their job. + // at this point the Run() can hang and caller have to implement the logic that will kill + // this controller (SIGKILL). + klog.Infof("Shutting down %s ...", c.name) +} + +func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext) error { + return c.sync(ctx, syncCtx) +} + +// runWorker runs a single worker +// The worker is asked to terminate when the passed context is cancelled and is given terminationGraceDuration time +// to complete its shutdown. +func (c *baseController) runWorker(queueCtx context.Context) { + wait.UntilWithContext( + queueCtx, + func(queueCtx context.Context) { + defer utilruntime.HandleCrash(c.degradedPanicHandler) + for { + select { + case <-queueCtx.Done(): + return + default: + c.processNextWorkItem(queueCtx) + } + } + }, + 1*time.Second) +} + +// reconcile wraps the sync() call and if operator client is set, it handle the degraded condition if sync() returns an error. +func (c *baseController) reconcile(ctx context.Context, syncCtx SyncContext) error { + err := c.sync(ctx, syncCtx) + degradedErr := c.reportDegraded(ctx, err) + if apierrors.IsNotFound(degradedErr) && management.IsOperatorRemovable() { + // The operator tolerates missing CR, therefore don't report it up. + return err + } + return degradedErr +} + +// degradedPanicHandler will go degraded on failures, then we should catch potential panics and covert them into bad status. +func (c *baseController) degradedPanicHandler(panicVal interface{}) { + if c.syncDegradedClient == nil { + // if we don't have a client for reporting degraded condition, then let the existing panic handler do the work + return + } + _ = c.reportDegraded(context.TODO(), fmt.Errorf("panic caught:\n%v", panicVal)) +} + +// reportDegraded updates status with an indication of degraded-ness +func (c *baseController) reportDegraded(ctx context.Context, reportedError error) error { + if c.syncDegradedClient == nil { + return reportedError + } + if reportedError != nil { + _, _, updateErr := v1helpers.UpdateStatus(ctx, c.syncDegradedClient, v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ + Type: c.name + "Degraded", + Status: operatorv1.ConditionTrue, + Reason: "SyncError", + Message: reportedError.Error(), + })) + if updateErr != nil { + klog.Warningf("Updating status of %q failed: %v", c.Name(), updateErr) + } + return reportedError + } + _, _, updateErr := v1helpers.UpdateStatus(ctx, c.syncDegradedClient, + v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ + Type: c.name + "Degraded", + Status: operatorv1.ConditionFalse, + Reason: "AsExpected", + })) + return updateErr +} + +func (c *baseController) processNextWorkItem(queueCtx context.Context) { + key, quit := c.syncContext.Queue().Get() + if quit { + return + } + defer c.syncContext.Queue().Done(key) + + syncCtx := c.syncContext.(syncContext) + var ok bool + syncCtx.queueKey, ok = key.(string) + if !ok { + utilruntime.HandleError(fmt.Errorf("%q controller failed to process key %q (not a string)", c.name, key)) + return + } + + if err := c.reconcile(queueCtx, syncCtx); err != nil { + if err == SyntheticRequeueError { + // logging this helps detecting wedged controllers with missing pre-requirements + klog.V(5).Infof("%q controller requested synthetic requeue with key %q", c.name, key) + } else { + if klog.V(4).Enabled() || key != "key" { + utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err)) + } else { + utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err)) + } + } + c.syncContext.Queue().AddRateLimited(key) + return + } + + c.syncContext.Queue().Forget(key) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go new file mode 100644 index 000000000..3c585e40a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go @@ -0,0 +1,116 @@ +package factory + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// syncContext implements SyncContext and provide user access to queue and object that caused +// the sync to be triggered. +type syncContext struct { + eventRecorder events.Recorder + queue workqueue.RateLimitingInterface + queueKey string +} + +var _ SyncContext = syncContext{} + +// NewSyncContext gives new sync context. +func NewSyncContext(name string, recorder events.Recorder) SyncContext { + return syncContext{ + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + eventRecorder: recorder.WithComponentSuffix(strings.ToLower(name)), + } +} + +func (c syncContext) Queue() workqueue.RateLimitingInterface { + return c.queue +} + +func (c syncContext) QueueKey() string { + return c.queueKey +} + +func (c syncContext) Recorder() events.Recorder { + return c.eventRecorder +} + +// eventHandler provides default event handler that is added to an informers passed to controller factory. +func (c syncContext) eventHandler(queueKeysFunc ObjectQueueKeysFunc, filter EventFilterFunc) cache.ResourceEventHandler { + resourceEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("added object %+v is not runtime Object", obj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + UpdateFunc: func(old, new interface{}) { + runtimeObj, ok := new.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + DeleteFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + c.enqueueKeys(queueKeysFunc(tombstone.Obj.(runtime.Object))...) + + return + } + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + } + if filter == nil { + return resourceEventHandler + } + return cache.FilteringResourceEventHandler{ + FilterFunc: filter, + Handler: resourceEventHandler, + } +} + +func (c syncContext) enqueueKeys(keys ...string) { + for _, qKey := range keys { + c.queue.Add(qKey) + } +} + +// namespaceChecker returns a function which returns true if an inpuut obj +// (or its tombstone) is a namespace and it matches a name of any namespaces +// that we are interested in +func namespaceChecker(interestingNamespaces []string) func(obj interface{}) bool { + interestingNamespacesSet := sets.NewString(interestingNamespaces...) + + return func(obj interface{}) bool { + ns, ok := obj.(*corev1.Namespace) + if ok { + return interestingNamespacesSet.Has(ns.Name) + } + + // the object might be getting deleted + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if ok { + if ns, ok := tombstone.Obj.(*corev1.Namespace); ok { + return interestingNamespacesSet.Has(ns.Name) + } + } + return false + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go new file mode 100644 index 000000000..b70da9548 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go @@ -0,0 +1,26 @@ +package factory + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +func ObjectNameToKey(obj runtime.Object) string { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return "" + } + return metaObj.GetObjectMeta().GetName() +} + +func NamesFilter(names ...string) EventFilterFunc { + nameSet := sets.NewString(names...) + return func(obj interface{}) bool { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return false + } + return nameSet.Has(metaObj.GetObjectMeta().GetName()) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go new file mode 100644 index 000000000..8f910f672 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go @@ -0,0 +1,309 @@ +package factory + +import ( + "context" + "fmt" + "time" + + "github.com/robfig/cron" + "k8s.io/apimachinery/pkg/runtime" + errorutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// DefaultQueueKey is the queue key used for string trigger based controllers. +const DefaultQueueKey = "key" + +// DefaultQueueKeysFunc returns a slice with a single element - the DefaultQueueKey +func DefaultQueueKeysFunc(_ runtime.Object) []string { + return []string{DefaultQueueKey} +} + +// Factory is generator that generate standard Kubernetes controllers. +// Factory is really generic and should be only used for simple controllers that does not require special stuff.. +type Factory struct { + sync SyncFunc + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncInterval time.Duration + resyncSchedules []string + informers []filteredInformers + informerQueueKeys []informersWithQueueKey + bareInformers []Informer + postStartHooks []PostStartHook + namespaceInformers []*namespaceInformer + cachesToSync []cache.InformerSynced + interestingNamespaces sets.String +} + +// Informer represents any structure that allow to register event handlers and informs if caches are synced. +// Any SharedInformer will comply. +type Informer interface { + AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) + HasSynced() bool +} + +type namespaceInformer struct { + informer Informer + nsFilter EventFilterFunc +} + +type informersWithQueueKey struct { + informers []Informer + filter EventFilterFunc + queueKeyFn ObjectQueueKeysFunc +} + +type filteredInformers struct { + informers []Informer + filter EventFilterFunc +} + +// PostStartHook specify a function that will run after controller is started. +// The context is cancelled when the controller is asked to shutdown and the post start hook should terminate as well. +// The syncContext allow access to controller queue and event recorder. +type PostStartHook func(ctx context.Context, syncContext SyncContext) error + +// ObjectQueueKeyFunc is used to make a string work queue key out of the runtime object that is passed to it. +// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string +// triggers. +// DEPRECATED: use ObjectQueueKeysFunc instead +type ObjectQueueKeyFunc func(runtime.Object) string + +// ObjectQueueKeysFunc is used to make a string work queue keys out of the runtime object that is passed to it. +// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string +// triggers. +type ObjectQueueKeysFunc func(runtime.Object) []string + +// EventFilterFunc is used to filter informer events to prevent Sync() from being called +type EventFilterFunc func(obj interface{}) bool + +// New return new factory instance. +func New() *Factory { + return &Factory{} +} + +// Sync is used to set the controller synchronization function. This function is the core of the controller and is +// usually hold the main controller logic. +func (f *Factory) WithSync(syncFn SyncFunc) *Factory { + f.sync = syncFn + return f +} + +// WithInformers is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +func (f *Factory) WithInformers(informers ...Informer) *Factory { + f.WithFilteredEventsInformers(nil, informers...) + return f +} + +// WithFilteredEventsInformers is used to register event handlers and get the caches synchronized functions. +// Pass the informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformers(filter EventFilterFunc, informers ...Informer) *Factory { + f.informers = append(f.informers, filteredInformers{ + informers: informers, + filter: filter, + }) + return f +} + +// WithBareInformers allow to register informer that already has custom event handlers registered and no additional +// event handlers will be added to this informer. +// The controller will wait for the cache of this informer to be synced. +// The existing event handlers will have to respect the queue key function or the sync() implementation will have to +// count with custom queue keys. +func (f *Factory) WithBareInformers(informers ...Informer) *Factory { + f.bareInformers = append(f.bareInformers, informers...) + return f +} + +// WithInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +func (f *Factory) WithInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + queueKeyFn: func(o runtime.Object) []string { + return []string{queueKeyFn(o)} + }, + }) + return f +} + +// WithFilteredEventsInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, filter EventFilterFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + filter: filter, + queueKeyFn: func(o runtime.Object) []string { + return []string{queueKeyFn(o)} + }, + }) + return f +} + +// WithInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +func (f *Factory) WithInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithFilteredEventsInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, filter EventFilterFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + filter: filter, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithPostStartHooks allows to register functions that will run asynchronously after the controller is started via Run command. +func (f *Factory) WithPostStartHooks(hooks ...PostStartHook) *Factory { + f.postStartHooks = append(f.postStartHooks, hooks...) + return f +} + +// WithNamespaceInformer is used to register event handlers and get the caches synchronized functions. +// The sync function will only trigger when the object observed by this informer is a namespace and its name matches the interestingNamespaces. +// Do not use this to register non-namespace informers. +func (f *Factory) WithNamespaceInformer(informer Informer, interestingNamespaces ...string) *Factory { + f.namespaceInformers = append(f.namespaceInformers, &namespaceInformer{ + informer: informer, + nsFilter: namespaceChecker(interestingNamespaces), + }) + return f +} + +// ResyncEvery will cause the Sync() function to be called periodically, regardless of informers. +// This is useful when you want to refresh every N minutes or you fear that your informers can be stucked. +// If this is not called, no periodical resync will happen. +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncEvery(interval time.Duration) *Factory { + f.resyncInterval = interval + return f +} + +// ResyncSchedule allows to supply a Cron syntax schedule that will be used to schedule the sync() call runs. +// This allows more fine-tuned controller scheduling than ResyncEvery. +// Examples: +// +// factory.New().ResyncSchedule("@every 1s").ToController() // Every second +// factory.New().ResyncSchedule("@hourly").ToController() // Every hour +// factory.New().ResyncSchedule("30 * * * *").ToController() // Every hour on the half hour +// +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncSchedule(schedules ...string) *Factory { + f.resyncSchedules = append(f.resyncSchedules, schedules...) + return f +} + +// WithSyncContext allows to specify custom, existing sync context for this factory. +// This is useful during unit testing where you can override the default event recorder or mock the runtime objects. +// If this function not called, a SyncContext is created by the factory automatically. +func (f *Factory) WithSyncContext(ctx SyncContext) *Factory { + f.syncContext = ctx + return f +} + +// WithSyncDegradedOnError encapsulate the controller sync() function, so when this function return an error, the operator client +// is used to set the degraded condition to (eg. "ControllerFooDegraded"). The degraded condition name is set based on the controller name. +func (f *Factory) WithSyncDegradedOnError(operatorClient operatorv1helpers.OperatorClient) *Factory { + f.syncDegradedClient = operatorClient + return f +} + +// Controller produce a runnable controller. +func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller { + if f.sync == nil { + panic(fmt.Errorf("WithSync() must be used before calling ToController() in %q", name)) + } + + var ctx SyncContext + if f.syncContext != nil { + ctx = f.syncContext + } else { + ctx = NewSyncContext(name, eventRecorder) + } + + var cronSchedules []cron.Schedule + if len(f.resyncSchedules) > 0 { + var errors []error + for _, schedule := range f.resyncSchedules { + if s, err := cron.ParseStandard(schedule); err != nil { + errors = append(errors, err) + } else { + cronSchedules = append(cronSchedules, s) + } + } + if err := errorutil.NewAggregate(errors); err != nil { + panic(fmt.Errorf("failed to parse controller schedules for %q: %v", name, err)) + } + } + + c := &baseController{ + name: name, + syncDegradedClient: f.syncDegradedClient, + sync: f.sync, + resyncEvery: f.resyncInterval, + resyncSchedules: cronSchedules, + cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...), + syncContext: ctx, + postStartHooks: f.postStartHooks, + cacheSyncTimeout: defaultCacheSyncTimeout, + } + + for i := range f.informerQueueKeys { + for d := range f.informerQueueKeys[i].informers { + informer := f.informerQueueKeys[i].informers[d] + queueKeyFn := f.informerQueueKeys[i].queueKeyFn + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter)) + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.informers { + for d := range f.informers[i].informers { + informer := f.informers[i].informers[d] + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.informers[i].filter)) + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.bareInformers { + c.cachesToSync = append(c.cachesToSync, f.bareInformers[i].HasSynced) + } + + for i := range f.namespaceInformers { + f.namespaceInformers[i].informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.namespaceInformers[i].nsFilter)) + c.cachesToSync = append(c.cachesToSync, f.namespaceInformers[i].informer.HasSynced) + } + + return c +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go new file mode 100644 index 000000000..0ef98c670 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go @@ -0,0 +1,47 @@ +package factory + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// Controller interface represents a runnable Kubernetes controller. +// Cancelling the syncContext passed will cause the controller to shutdown. +// Number of workers determine how much parallel the job processing should be. +type Controller interface { + // Run runs the controller and blocks until the controller is finished. + // Number of workers can be specified via workers parameter. + // This function will return when all internal loops are finished. + // Note that having more than one worker usually means handing parallelization of Sync(). + Run(ctx context.Context, workers int) + + // Sync contain the main controller logic. + // This should not be called directly, but can be used in unit tests to exercise the sync. + Sync(ctx context.Context, controllerContext SyncContext) error + + // Name returns the controller name string. + Name() string +} + +// SyncContext interface represents a context given to the Sync() function where the main controller logic happen. +// SyncContext exposes controller name and give user access to the queue (for manual requeue). +// SyncContext also provides metadata about object that informers observed as changed. +type SyncContext interface { + // Queue gives access to controller queue. This can be used for manual requeue, although if a Sync() function return + // an error, the object is automatically re-queued. Use with caution. + Queue() workqueue.RateLimitingInterface + + // QueueKey represents the queue key passed to the Sync function. + QueueKey() string + + // Recorder provide access to event recorder. + Recorder() events.Recorder +} + +// SyncFunc is a function that contain main controller logic. +// The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down. +// The syncContext provides access to controller name, queue and event recorder. +type SyncFunc func(ctx context.Context, controllerContext SyncContext) error diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS new file mode 100644 index 000000000..4d4ce5ab9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - stlaz +approvers: + - stlaz diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go new file mode 100644 index 000000000..554112c49 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -0,0 +1,1252 @@ +package crypto + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + mathrand "math/rand" + "net" + "os" + "path/filepath" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/util/cert" +) + +// TLS versions that are known to golang. Go 1.13 adds support for +// TLS 1.3 that's opt-out with a build flag. +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLS versions that are enabled. +var supportedVersions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLSVersionToNameOrDie given a tls version as an int, return its readable name +func TLSVersionToNameOrDie(intVal uint16) string { + matches := []string{} + for key, version := range versions { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} +func TLSVersionOrDie(versionName string) uint16 { + version, err := TLSVersion(versionName) + if err != nil { + panic(err) + } + return version +} + +// TLS versions that are known to golang, but may not necessarily be enabled. +func GolangTLSVersions() []string { + supported := []string{} + for k := range versions { + supported = append(supported, k) + } + sort.Strings(supported) + return supported +} + +// Returns the build enabled TLS versions. +func ValidTLSVersions() []string { + validVersions := []string{} + for k := range supportedVersions { + validVersions = append(validVersions, k) + } + sort.Strings(validVersions) + return validVersions +} +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} + +// ciphersTLS13 copies golang 1.13 implementation, where TLS1.3 suites are not +// configurable (cipherSuites field is ignored for TLS1.3 flows and all of the +// below three - and none other - are used) +var ciphersTLS13 = map[string]uint16{ + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, +} + +var ciphers = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, +} + +// openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names +// ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +var openSSLToIANACiphersMap = map[string]string{ + // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows + // "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 + // "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 + // "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 + + // TLS 1.2 + "ECDHE-ECDSA-AES128-GCM-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2B + "ECDHE-RSA-AES128-GCM-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2F + "ECDHE-ECDSA-AES256-GCM-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x2C + "ECDHE-RSA-AES256-GCM-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x30 + "ECDHE-ECDSA-CHACHA20-POLY1305": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA9 + "ECDHE-RSA-CHACHA20-POLY1305": "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA8 + "ECDHE-ECDSA-AES128-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x23 + "ECDHE-RSA-AES128-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x27 + "AES128-GCM-SHA256": "TLS_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9C + "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D + "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C + + // TLS 1 + "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 + "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13 + "ECDHE-ECDSA-AES256-SHA": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", // 0xC0,0x0A + "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 + + // SSL 3 + "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F + "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 + "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A +} + +// CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names +func CipherSuitesToNamesOrDie(intVals []uint16) []string { + ret := []string{} + for _, intVal := range intVals { + ret = append(ret, CipherSuiteToNameOrDie(intVal)) + } + + return ret +} + +// CipherSuiteToNameOrDie given a cipher suite as an int, return its readable name +func CipherSuiteToNameOrDie(intVal uint16) string { + // The following suite ids appear twice in the cipher map (with + // and without the _SHA256 suffix) for the purposes of backwards + // compatibility. Always return the current rather than the legacy + // name. + switch intVal { + case tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: + return "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + case tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: + return "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + } + + matches := []string{} + for key, version := range ciphers { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func CipherSuite(cipherName string) (uint16, error) { + if cipher, ok := ciphers[cipherName]; ok { + return cipher, nil + } + + if _, ok := ciphersTLS13[cipherName]; ok { + return 0, fmt.Errorf("all golang TLSv1.3 ciphers are always used for TLSv1.3 flows") + } + + return 0, fmt.Errorf("unknown cipher name %q", cipherName) +} + +func CipherSuitesOrDie(cipherNames []string) []uint16 { + if len(cipherNames) == 0 { + return DefaultCiphers() + } + cipherValues := []uint16{} + for _, cipherName := range cipherNames { + cipher, err := CipherSuite(cipherName) + if err != nil { + panic(err) + } + cipherValues = append(cipherValues, cipher) + } + return cipherValues +} +func ValidCipherSuites() []string { + validCipherSuites := []string{} + for k := range ciphers { + validCipherSuites = append(validCipherSuites, k) + } + sort.Strings(validCipherSuites) + return validCipherSuites +} +func DefaultCiphers() []uint16 { + // HTTP/2 mandates TLS 1.2 or higher with an AEAD cipher + // suite (GCM, Poly1305) and ephemeral key exchange (ECDHE, DHE) for + // perfect forward secrecy. Servers may provide additional cipher + // suites for backwards compatibility with HTTP/1.1 clients. + // See RFC7540, section 9.2 (Use of TLS Features) and Appendix A + // (TLS 1.2 Cipher Suite Black List). + return []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // forbidden by http/2 + // the next one is in the intermediate suite, but go1.8 http2isBadCipher() complains when it is included at the recommended index + // because it comes after ciphers forbidden by the http/2 spec + // tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + // tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + // tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + tls.TLS_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + } +} + +// SecureTLSConfig enforces the default minimum security settings for the cluster. +func SecureTLSConfig(config *tls.Config) *tls.Config { + if config.MinVersion == 0 { + config.MinVersion = DefaultTLSVersion() + } + + config.PreferServerCipherSuites = true + if len(config.CipherSuites) == 0 { + config.CipherSuites = DefaultCiphers() + } + return config +} + +// OpenSSLToIANACipherSuites maps input OpenSSL Cipher Suite names to their +// IANA counterparts. +// Unknown ciphers are left out. +func OpenSSLToIANACipherSuites(ciphers []string) []string { + ianaCiphers := make([]string, 0, len(ciphers)) + + for _, c := range ciphers { + ianaCipher, found := openSSLToIANACiphersMap[c] + if found { + ianaCiphers = append(ianaCiphers, ianaCipher) + } + } + + return ianaCiphers +} + +type TLSCertificateConfig struct { + Certs []*x509.Certificate + Key crypto.PrivateKey +} + +type TLSCARoots struct { + Roots []*x509.Certificate +} + +func (c *TLSCertificateConfig) WriteCertConfigFile(certFile, keyFile string) error { + // ensure parent dir + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return err + } + certFileWriter, err := os.OpenFile(certFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return err + } + keyFileWriter, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + + if err := writeCertificates(certFileWriter, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFileWriter, c.Key); err != nil { + return err + } + + if err := certFileWriter.Close(); err != nil { + return err + } + if err := keyFileWriter.Close(); err != nil { + return err + } + + return nil +} + +func (c *TLSCertificateConfig) WriteCertConfig(certFile, keyFile io.Writer) error { + if err := writeCertificates(certFile, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFile, c.Key); err != nil { + return err + } + return nil +} + +func (c *TLSCertificateConfig) GetPEMBytes() ([]byte, []byte, error) { + certBytes, err := EncodeCertificates(c.Certs...) + if err != nil { + return nil, nil, err + } + keyBytes, err := encodeKey(c.Key) + if err != nil { + return nil, nil, err + } + + return certBytes, keyBytes, nil +} + +func GetTLSCertificateConfig(certFile, keyFile string) (*TLSCertificateConfig, error) { + if len(certFile) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyFile) == 0 { + return nil, errors.New("keyFile missing") + } + + certPEMBlock, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + certs, err := cert.ParseCertsPEM(certPEMBlock) + if err != nil { + return nil, fmt.Errorf("Error reading %s: %s", certFile, err) + } + + keyPEMBlock, err := os.ReadFile(keyFile) + if err != nil { + return nil, err + } + keyPairCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertificateConfig, error) { + if len(certBytes) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyBytes) == 0 { + return nil, errors.New("keyFile missing") + } + + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + return nil, fmt.Errorf("Error reading cert: %s", err) + } + + keyPairCert, err := tls.X509KeyPair(certBytes, keyBytes) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +const ( + DefaultCertificateLifetimeInDays = 365 * 2 // 2 years + DefaultCACertificateLifetimeInDays = 365 * 5 // 5 years + + // Default keys are 2048 bits + keyBits = 2048 +) + +type CA struct { + Config *TLSCertificateConfig + + SerialGenerator SerialGenerator +} + +// SerialGenerator is an interface for getting a serial number for the cert. It MUST be thread-safe. +type SerialGenerator interface { + Next(template *x509.Certificate) (int64, error) +} + +// SerialFileGenerator returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +type SerialFileGenerator struct { + SerialFile string + + // lock guards access to the Serial field + lock sync.Mutex + Serial int64 +} + +func NewSerialFileGenerator(serialFile string) (*SerialFileGenerator, error) { + // read serial file, it must already exist + serial, err := fileToSerial(serialFile) + if err != nil { + return nil, err + } + + generator := &SerialFileGenerator{ + Serial: serial, + SerialFile: serialFile, + } + + // 0 is unused and 1 is reserved for the CA itself + // Thus we need to guarantee that the first external call to SerialFileGenerator.Next returns 2+ + // meaning that SerialFileGenerator.Serial must not be less than 1 (it is guaranteed to be non-negative) + if generator.Serial < 1 { + // fake a call to Next so the file stays in sync and Serial is incremented + if _, err := generator.Next(&x509.Certificate{}); err != nil { + return nil, err + } + } + + return generator, nil +} + +// Next returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +func (s *SerialFileGenerator) Next(template *x509.Certificate) (int64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + // do a best effort check to make sure concurrent external writes are not occurring to the underlying serial file + serial, err := fileToSerial(s.SerialFile) + if err != nil { + return 0, err + } + if serial != s.Serial { + return 0, fmt.Errorf("serial file %s out of sync ram=%d disk=%d", s.SerialFile, s.Serial, serial) + } + + next := s.Serial + 1 + s.Serial = next + + // Output in hex, padded to multiples of two characters for OpenSSL's sake + serialText := fmt.Sprintf("%X", next) + if len(serialText)%2 == 1 { + serialText = "0" + serialText + } + // always add a newline at the end to have a valid file + serialText += "\n" + + if err := os.WriteFile(s.SerialFile, []byte(serialText), os.FileMode(0640)); err != nil { + return 0, err + } + return next, nil +} + +func fileToSerial(serialFile string) (int64, error) { + serialData, err := os.ReadFile(serialFile) + if err != nil { + return 0, err + } + + // read the file as a single hex number after stripping any whitespace + serial, err := strconv.ParseInt(string(bytes.TrimSpace(serialData)), 16, 64) + if err != nil { + return 0, err + } + + if serial < 0 { + return 0, fmt.Errorf("invalid negative serial %d in serial file %s", serial, serialFile) + } + + return serial, nil +} + +// RandomSerialGenerator returns a serial based on time.Now and the subject +type RandomSerialGenerator struct { +} + +func (s *RandomSerialGenerator) Next(template *x509.Certificate) (int64, error) { + return randomSerialNumber(), nil +} + +// randomSerialNumber returns a random int64 serial number based on +// time.Now. It is defined separately from the generator interface so +// that the caller doesn't have to worry about an input template or +// error - these are unnecessary when creating a random serial. +func randomSerialNumber() int64 { + r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano())) + return r.Int63() +} + +// EnsureCA returns a CA, whether it was created (as opposed to pre-existing), and any error +// if serialFile is empty, a RandomSerialGenerator will be used +func EnsureCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { + if ca, err := GetCA(certFile, keyFile, serialFile); err == nil { + return ca, false, err + } + ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, expireDays) + return ca, true, err +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func GetCA(certFile, keyFile, serialFile string) (*CA, error) { + caConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func GetCAFromBytes(certBytes, keyBytes []byte) (*CA, error) { + caConfig, err := GetTLSCertificateConfigFromBytes(certBytes, keyBytes) + if err != nil { + return nil, err + } + + return &CA{ + SerialGenerator: &RandomSerialGenerator{}, + Config: caConfig, + }, nil +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { + klog.V(2).Infof("Generating new CA for %s cert, and key in %s, %s", name, certFile, keyFile) + + caConfig, err := MakeSelfSignedCAConfig(name, expireDays) + if err != nil { + return nil, err + } + if err := caConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func MakeSelfSignedCAConfig(name string, expireDays int) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return MakeSelfSignedCAConfigForSubject(subject, expireDays) +} + +func MakeSelfSignedCAConfigForSubject(subject pkix.Name, expireDays int) (*TLSCertificateConfig, error) { + var caLifetimeInDays = DefaultCACertificateLifetimeInDays + if expireDays > 0 { + caLifetimeInDays = expireDays + } + + if caLifetimeInDays > DefaultCACertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeInDays) + } + + caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour + return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime) +} + +func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime) +} + +func makeSelfSignedCAConfigForSubjectAndDuration(subject pkix.Name, caLifetime time.Duration) (*TLSCertificateConfig, error) { + // Create CA cert + rootcaPublicKey, rootcaPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + // AuthorityKeyId and SubjectKeyId should match for a self-signed CA + authorityKeyId := publicKeyHash + subjectKeyId := publicKeyHash + rootcaTemplate := newSigningCertificateTemplateForDuration(subject, caLifetime, time.Now, authorityKeyId, subjectKeyId) + rootcaCert, err := signCertificate(rootcaTemplate, rootcaPublicKey, rootcaTemplate, rootcaPrivateKey) + if err != nil { + return nil, err + } + caConfig := &TLSCertificateConfig{ + Certs: []*x509.Certificate{rootcaCert}, + Key: rootcaPrivateKey, + } + return caConfig, nil +} + +func MakeCAConfigForDuration(name string, caLifetime time.Duration, issuer *CA) (*TLSCertificateConfig, error) { + // Create CA cert + signerPublicKey, signerPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + authorityKeyId := issuer.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + signerTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now, authorityKeyId, subjectKeyId) + signerCert, err := issuer.signCertificate(signerTemplate, signerPublicKey) + if err != nil { + return nil, err + } + signerConfig := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{signerCert}, issuer.Config.Certs...), + Key: signerPrivateKey, + } + return signerConfig, nil +} + +// EnsureSubCA returns a subCA signed by the `ca`, whether it was created +// (as opposed to pre-existing), and any error that might occur during the subCA +// creation. +// If serialFile is an empty string, a RandomSerialGenerator will be used. +func (ca *CA) EnsureSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { + if subCA, err := GetCA(certFile, keyFile, serialFile); err == nil { + return subCA, false, err + } + subCA, err := ca.MakeAndWriteSubCA(certFile, keyFile, serialFile, name, expireDays) + return subCA, true, err +} + +// MakeAndWriteSubCA returns a new sub-CA configuration. New cert/key pair is generated +// while using this function. +// If serialFile is an empty string, a RandomSerialGenerator will be used. +func (ca *CA) MakeAndWriteSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { + klog.V(4).Infof("Generating sub-CA certificate in %s, key in %s, serial in %s", certFile, keyFile, serialFile) + + subCAConfig, err := MakeCAConfigForDuration(name, time.Duration(expireDays)*time.Hour*24, ca) + if err != nil { + return nil, err + } + + if err := subCAConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + Config: subCAConfig, + SerialGenerator: serialGenerator, + }, nil +} + +func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetServerCert(certFile, keyFile, hostnames) + if err != nil { + certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, expireDays) + return certConfig, true, err + } + + return certConfig, false, nil +} + +func GetServerCert(certFile, keyFile string, hostnames sets.String) (*TLSCertificateConfig, error) { + server, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + cert := server.Certs[0] + ips, dns := IPAddressesDNSNames(hostnames.List()) + missingIps := ipsNotInSlice(ips, cert.IPAddresses) + missingDns := stringsNotInSlice(dns, cert.DNSNames) + if len(missingIps) == 0 && len(missingDns) == 0 { + klog.V(4).Infof("Found existing server certificate in %s", certFile) + return server, nil + } + + return nil, fmt.Errorf("Existing server certificate in %s was missing some hostnames (%v) or IP addresses (%v).", certFile, missingDns, missingIps) +} + +func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile) + + server, err := ca.MakeServerCert(hostnames, expireDays) + if err != nil { + return nil, err + } + if err := server.WriteCertConfigFile(certFile, keyFile); err != nil { + return server, err + } + return server, nil +} + +// CertificateExtensionFunc is passed a certificate that it may extend, or return an error +// if the extension attempt failed. +type CertificateExtensionFunc func(*x509.Certificate) error + +func (ca *CA) MakeServerCert(hostnames sets.String, expireDays int, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), expireDays, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) MakeServerCertForDuration(hostnames sets.String, lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplateForDuration(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), lifetime, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetClientCertificate(certFile, keyFile, u) + if err != nil { + certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, expireDays) + return certConfig, true, err // true indicates we wrote the files. + } + return certConfig, false, nil +} + +func GetClientCertificate(certFile, keyFile string, u user.Info) (*TLSCertificateConfig, error) { + certConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + if subject := certConfig.Certs[0].Subject; subjectChanged(subject, userToSubject(u)) { + return nil, fmt.Errorf("existing client certificate in %s was issued for a different Subject (%s)", + certFile, subject) + } + + return certConfig, nil +} + +func subjectChanged(existing, expected pkix.Name) bool { + sort.Strings(existing.Organization) + sort.Strings(expected.Organization) + + return existing.CommonName != expected.CommonName || + existing.SerialNumber != expected.SerialNumber || + !reflect.DeepEqual(existing.Organization, expected.Organization) +} + +func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating client cert in %s and key in %s", certFile, keyFile) + // ensure parent dirs + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return nil, err + } + + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := newClientCertificateTemplate(userToSubject(u), expireDays, time.Now) + clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := encodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + if err = os.WriteFile(certFile, certData, os.FileMode(0644)); err != nil { + return nil, err + } + if err = os.WriteFile(keyFile, keyData, os.FileMode(0600)); err != nil { + return nil, err + } + + return GetTLSCertificateConfig(certFile, keyFile) +} + +func (ca *CA) MakeClientCertificateForDuration(u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) { + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := newClientCertificateTemplateForDuration(userToSubject(u), lifetime, time.Now) + clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := encodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + return GetTLSCertificateConfigFromBytes(certData, keyData) +} + +type sortedForDER []string + +func (s sortedForDER) Len() int { + return len(s) +} +func (s sortedForDER) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortedForDER) Less(i, j int) bool { + l1 := len(s[i]) + l2 := len(s[j]) + if l1 == l2 { + return s[i] < s[j] + } + return l1 < l2 +} + +func userToSubject(u user.Info) pkix.Name { + // Ok we are going to order groups in a peculiar way here to workaround a + // 2 bugs, 1 in golang (https://github.com/golang/go/issues/24254) which + // incorrectly encodes Multivalued RDNs and another in GNUTLS clients + // which are too picky (https://gitlab.com/gnutls/gnutls/issues/403) + // and try to "correct" this issue when reading client certs. + // + // This workaround should be killed once Golang's pkix module is fixed to + // generate a correct DER encoding. + // + // The workaround relies on the fact that the first octect that differs + // between the encoding of two group RDNs will end up being the encoded + // length which is directly related to the group name's length. So we'll + // sort such that shortest names come first. + ugroups := u.GetGroups() + groups := make([]string, len(ugroups)) + copy(groups, ugroups) + sort.Sort(sortedForDER(groups)) + + return pkix.Name{ + CommonName: u.GetName(), + SerialNumber: u.GetUID(), + Organization: groups, + } +} + +func (ca *CA) signCertificate(template *x509.Certificate, requestKey crypto.PublicKey) (*x509.Certificate, error) { + // Increment and persist serial + serial, err := ca.SerialGenerator.Next(template) + if err != nil { + return nil, err + } + template.SerialNumber = big.NewInt(serial) + return signCertificate(template, requestKey, ca.Config.Certs[0], ca.Config.Key) +} + +func NewKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { + return newRSAKeyPair() +} + +func newKeyPairWithHash() (crypto.PublicKey, crypto.PrivateKey, []byte, error) { + publicKey, privateKey, err := newRSAKeyPair() + var publicKeyHash []byte + if err == nil { + hash := sha1.New() + hash.Write(publicKey.N.Bytes()) + publicKeyHash = hash.Sum(nil) + } + return publicKey, privateKey, publicKeyHash, err +} + +func newRSAKeyPair() (*rsa.PublicKey, *rsa.PrivateKey, error) { + privateKey, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + return nil, nil, err + } + return &privateKey.PublicKey, privateKey, nil +} + +// Can be used for CA or intermediate signing certs +func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(caLifetime), + + // Specify a random serial number to avoid the same issuer+serial + // number referring to different certs in a chain of trust if the + // signing certificate is ever rotated. + SerialNumber: big.NewInt(randomSerialNumber()), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplate(subject pkix.Name, hosts []string, expireDays int, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newServerCertificateTemplateForDuration(subject, hosts, lifetime, currentTime, authorityKeyId, subjectKeyId) +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplateForDuration(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + template := &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } + + template.IPAddresses, template.DNSNames = IPAddressesDNSNames(hosts) + + return template +} + +func IPAddressesDNSNames(hosts []string) ([]net.IP, []string) { + ips := []net.IP{} + dns := []string{} + for _, host := range hosts { + if ip := net.ParseIP(host); ip != nil { + ips = append(ips, ip) + } else { + dns = append(dns, host) + } + } + + // Include IP addresses as DNS subjectAltNames in the cert as well, for the sake of Python, Windows (< 10), and unnamed other libraries + // Ensure these technically invalid DNS subjectAltNames occur after the valid ones, to avoid triggering cert errors in Firefox + // See https://bugzilla.mozilla.org/show_bug.cgi?id=1148766 + for _, ip := range ips { + dns = append(dns, ip.String()) + } + + return ips, dns +} + +func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("Could not read any certificates") + } + return certs, nil +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func newClientCertificateTemplate(subject pkix.Name, expireDays int, currentTime func() time.Time) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newClientCertificateTemplateForDuration(subject, lifetime, currentTime) +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func newClientCertificateTemplateForDuration(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } +} + +func warnAboutCertificateLifeTime(name string, defaultLifetimeInDays int) { + defaultLifetimeInYears := defaultLifetimeInDays / 365 + fmt.Fprintf(os.Stderr, "WARNING: Validity period of the certificate for %q is greater than %d years!\n", name, defaultLifetimeInYears) + fmt.Fprintln(os.Stderr, "WARNING: By security reasons it is strongly recommended to change this period and make it smaller!") +} + +func signCertificate(template *x509.Certificate, requestKey crypto.PublicKey, issuer *x509.Certificate, issuerKey crypto.PrivateKey) (*x509.Certificate, error) { + derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey) + if err != nil { + return nil, err + } + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, err + } + if len(certs) != 1 { + return nil, errors.New("Expected a single certificate") + } + return certs[0], nil +} + +func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { + b := bytes.Buffer{} + for _, cert := range certs { + if err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { + return []byte{}, err + } + } + return b.Bytes(), nil +} +func encodeKey(key crypto.PrivateKey) ([]byte, error) { + b := bytes.Buffer{} + switch key := key.(type) { + case *ecdsa.PrivateKey: + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return []byte{}, err + } + if err := pem.Encode(&b, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return b.Bytes(), err + } + case *rsa.PrivateKey: + if err := pem.Encode(&b, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}); err != nil { + return []byte{}, err + } + default: + return []byte{}, errors.New("Unrecognized key type") + + } + return b.Bytes(), nil +} + +func writeCertificates(f io.Writer, certs ...*x509.Certificate) error { + bytes, err := EncodeCertificates(certs...) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} +func writeKeyFile(f io.Writer, key crypto.PrivateKey) error { + bytes, err := encodeKey(key) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} + +func stringsNotInSlice(needles []string, haystack []string) []string { + missing := []string{} + for _, needle := range needles { + if !stringInSlice(needle, haystack) { + missing = append(missing, needle) + } + } + return missing +} + +func stringInSlice(needle string, haystack []string) bool { + for _, straw := range haystack { + if needle == straw { + return true + } + } + return false +} + +func ipsNotInSlice(needles []net.IP, haystack []net.IP) []net.IP { + missing := []net.IP{} + for _, needle := range needles { + if !ipInSlice(needle, haystack) { + missing = append(missing, needle) + } + } + return missing +} + +func ipInSlice(needle net.IP, haystack []net.IP) bool { + for _, straw := range haystack { + if needle.Equal(straw) { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go new file mode 100644 index 000000000..0aa127037 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go @@ -0,0 +1,20 @@ +package crypto + +import ( + "crypto/x509" + "time" +) + +// FilterExpiredCerts checks are all certificates in the bundle valid, i.e. they have not expired. +// The function returns new bundle with only valid certificates or error if no valid certificate is found. +func FilterExpiredCerts(certs ...*x509.Certificate) []*x509.Certificate { + currentTime := time.Now() + var validCerts []*x509.Certificate + for _, c := range certs { + if c.NotAfter.After(currentTime) { + validCerts = append(validCerts, c) + } + } + + return validCerts +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go new file mode 100644 index 000000000..1a522609a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go @@ -0,0 +1,72 @@ +package condition + +const ( + // ManagementStateDegradedConditionType is true when the operator ManagementState is not "Managed".. + // Possible reasons are Unmanaged, Removed or Unknown. Any of these cases means the operator is not actively managing the operand. + // This condition is set to false when the ManagementState is set to back to "Managed". + ManagementStateDegradedConditionType = "ManagementStateDegraded" + + // UnsupportedConfigOverridesUpgradeableConditionType is true when operator unsupported config overrides is changed. + // When NoUnsupportedConfigOverrides reason is given it means there are no unsupported config overrides. + // When UnsupportedConfigOverridesSet reason is given it means the unsupported config overrides are set, which might impact the ability + // of operator to successfully upgrade its operand. + UnsupportedConfigOverridesUpgradeableConditionType = "UnsupportedConfigOverridesUpgradeable" + + // MonitoringResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the ServiceMonitor + // CR resource, which is required by monitoring operator to collect Prometheus data from the operator. When this condition is true and the ServiceMonitor + // is already created, it won't have impact on collecting metrics. However, if the ServiceMonitor was not created, the metrics won't be available for + // collection until this condition is set to false. + // The condition is set to false automatically when the operator successfully synchronize the ServiceMonitor resource. + MonitoringResourceControllerDegradedConditionType = "MonitoringResourceControllerDegraded" + + // BackingResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the resources needed + // to successfully run the installer pods (installer CRB and SA). If these were already created, this condition is not fatal, however if the resources + // were not created it means the installer pod creation will fail. + // This condition is set to false when the operator can successfully synchronize installer SA and CRB. + BackingResourceControllerDegradedConditionType = "BackingResourceControllerDegraded" + + // StaticPodsDegradedConditionType is true when the operator observe errors when installing the new revision static pods. + // This condition report Error reason when the pods are terminated or not ready or waiting during which the operand quality of service is degraded. + // This condition is set to False when the pods change state to running and are observed ready. + StaticPodsDegradedConditionType = "StaticPodsDegraded" + + // StaticPodsAvailableConditionType is true when the static pod is available on at least one node. + StaticPodsAvailableConditionType = "StaticPodsAvailable" + + // ConfigObservationDegradedConditionType is true when the operator failed to observe or process configuration change. + // This is not transient condition and normally a correction or manual intervention is required on the config custom resource. + ConfigObservationDegradedConditionType = "ConfigObservationDegraded" + + // ResourceSyncControllerDegradedConditionType is true when the operator failed to synchronize one or more secrets or config maps required + // to run the operand. Operand ability to provide service might be affected by this condition. + // This condition is set to false when the operator is able to create secrets and config maps. + ResourceSyncControllerDegradedConditionType = "ResourceSyncControllerDegraded" + + // CertRotationDegradedConditionTypeFmt is true when the operator failed to properly rotate one or more certificates required by the operand. + // The RotationError reason is given with message describing details of this failure. This condition can be fatal when ignored as the existing certificate(s) + // validity can expire and without rotating/renewing them manual recovery might be required to fix the cluster. + CertRotationDegradedConditionTypeFmt = "CertRotation_%s_Degraded" + + // InstallerControllerDegradedConditionType is true when the operator is not able to create new installer pods so the new revisions + // cannot be rolled out. This might happen when one or more required secrets or config maps does not exists. + // In case the missing secret or config map is available, this condition is automatically set to false. + InstallerControllerDegradedConditionType = "InstallerControllerDegraded" + + // NodeInstallerDegradedConditionType is true when the operator is not able to create new installer pods because there are no schedulable nodes + // available to run the installer pods. + // The AllNodesAtLatestRevision reason is set when all master nodes are updated to the latest revision. It is false when some masters are pending revision. + // ZeroNodesActive reason is set to True when no active master nodes are observed. Is set to False when there is at least one active master node. + NodeInstallerDegradedConditionType = "NodeInstallerDegraded" + + // NodeInstallerProgressingConditionType is true when the operator is moving nodes to a new revision. + NodeInstallerProgressingConditionType = "NodeInstallerProgressing" + + // RevisionControllerDegradedConditionType is true when the operator is not able to create new desired revision because an error occurred when + // the operator attempted to created required resource(s) (secrets, configmaps, ...). + // This condition mean no new revision will be created. + RevisionControllerDegradedConditionType = "RevisionControllerDegraded" + + // NodeControllerDegradedConditionType is true when the operator observed a master node that is not ready. + // Note that a node is not ready when its Condition.NodeReady wasn't set to true + NodeControllerDegradedConditionType = "NodeControllerDegraded" +) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go new file mode 100644 index 000000000..3b9f61180 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go @@ -0,0 +1,284 @@ +package configobserver + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/imdario/mergo" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// Listers is an interface which will be passed to the config observer funcs. It is expected to be hard-cast to the "correct" type +type Listers interface { + // ResourceSyncer can be used to copy content from one namespace to another + ResourceSyncer() resourcesynccontroller.ResourceSyncer + PreRunHasSynced() []cache.InformerSynced +} + +// ObserveConfigFunc observes configuration and returns the observedConfig. This function should not return an +// observedConfig that would cause the service being managed by the operator to crash. For example, if a required +// configuration key cannot be observed, consider reusing the configuration key's previous value. Errors that occur +// while attempting to generate the observedConfig should be returned in the errs slice. +type ObserveConfigFunc func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) + +type ConfigObserver struct { + // observers are called in an undefined order and their results are merged to + // determine the observed configuration. + observers []ObserveConfigFunc + + operatorClient v1helpers.OperatorClient + + // listers are used by config observers to retrieve necessary resources + listers Listers + + nestedConfigPath []string + degradedConditionType string +} + +func NewConfigObserver( + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, + listers Listers, + informers []factory.Informer, + observers ...ObserveConfigFunc, +) factory.Controller { + return NewNestedConfigObserver( + operatorClient, + eventRecorder, + listers, + informers, + nil, + "", + observers..., + ) +} + +// NewNestedConfigObserver creates a config observer that watches changes to a nested field (nestedConfigPath) in the config. +// Useful when the config is shared across multiple controllers in the same process. +// +// Example: +// +// Given the following configuration, you could run two separate controllers and point each to its own section. +// The first controller would be responsible for "oauthAPIServer" and the second for "oauthServer" section. +// +// "observedConfig": { +// "oauthAPIServer": { +// "apiServerArguments": {"tls-min-version": "VersionTLS12"} +// }, +// "oauthServer": { +// "corsAllowedOrigins": [ "//127\\.0\\.0\\.1(:|$)","//localhost(:|$)"] +// } +// } +// +// oauthAPIController := NewNestedConfigObserver(..., []string{"oauthAPIServer"} +// oauthServerController := NewNestedConfigObserver(..., []string{"oauthServer"} +func NewNestedConfigObserver( + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, + listers Listers, + informers []factory.Informer, + nestedConfigPath []string, + degradedConditionPrefix string, + observers ...ObserveConfigFunc, +) factory.Controller { + c := &ConfigObserver{ + operatorClient: operatorClient, + observers: observers, + listers: listers, + nestedConfigPath: nestedConfigPath, + degradedConditionType: degradedConditionPrefix + condition.ConfigObservationDegradedConditionType, + } + + return factory.New().ResyncEvery(time.Minute).WithSync(c.sync).WithInformers(append(informers, listersToInformer(listers)...)...).ToController("ConfigObserver", eventRecorder.WithComponentSuffix("config-observer")) +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c ConfigObserver) sync(ctx context.Context, syncCtx factory.SyncContext) error { + originalSpec, _, _, err := c.operatorClient.GetOperatorState() + if management.IsOperatorRemovable() && apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + spec := originalSpec.DeepCopy() + + // don't worry about errors. If we can't decode, we'll simply stomp over the field. + existingConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(spec.ObservedConfig.Raw)).Decode(&existingConfig); err != nil { + klog.V(4).Infof("decode of existing config failed with error: %v", err) + } + + var errs []error + var observedConfigs []map[string]interface{} + for _, i := range rand.Perm(len(c.observers)) { + var currErrs []error + observedConfig, currErrs := c.observers[i](c.listers, syncCtx.Recorder(), existingConfig) + observedConfigs = append(observedConfigs, observedConfig) + errs = append(errs, currErrs...) + } + + mergedObservedConfig := map[string]interface{}{} + for _, observedConfig := range observedConfigs { + if err := mergo.Merge(&mergedObservedConfig, observedConfig); err != nil { + klog.Warningf("merging observed config failed: %v", err) + } + } + + reverseMergedObservedConfig := map[string]interface{}{} + for i := len(observedConfigs) - 1; i >= 0; i-- { + if err := mergo.Merge(&reverseMergedObservedConfig, observedConfigs[i]); err != nil { + klog.Warningf("merging observed config failed: %v", err) + } + } + + if !equality.Semantic.DeepEqual(mergedObservedConfig, reverseMergedObservedConfig) { + errs = append(errs, errors.New("non-deterministic config observation detected")) + } + + if err := c.updateObservedConfig(ctx, syncCtx, existingConfig, mergedObservedConfig); err != nil { + errs = []error{err} + } + configError := v1helpers.NewMultiLineAggregate(errs) + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: c.degradedConditionType, + Status: operatorv1.ConditionFalse, + } + if configError != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = configError.Error() + } + if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + + return configError +} + +func (c ConfigObserver) updateObservedConfig(ctx context.Context, syncCtx factory.SyncContext, existingConfig map[string]interface{}, mergedObservedConfig map[string]interface{}) error { + if len(c.nestedConfigPath) == 0 { + if !equality.Semantic.DeepEqual(existingConfig, mergedObservedConfig) { + syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated observed config: %v", diff.ObjectDiff(existingConfig, mergedObservedConfig)) + return c.updateConfig(ctx, syncCtx, mergedObservedConfig, v1helpers.UpdateObservedConfigFn) + } + return nil + } + + existingConfigNested, _, err := unstructured.NestedMap(existingConfig, c.nestedConfigPath...) + if err != nil { + return fmt.Errorf("unable to extract the config under %v key, err %v", c.nestedConfigPath, err) + } + mergedObservedConfigNested, _, err := unstructured.NestedMap(mergedObservedConfig, c.nestedConfigPath...) + if err != nil { + return fmt.Errorf("unable to extract the merged config under %v, err %v", c.nestedConfigPath, err) + } + if !equality.Semantic.DeepEqual(existingConfigNested, mergedObservedConfigNested) { + syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated section (%q) of observed config: %q", strings.Join(c.nestedConfigPath, "/"), diff.ObjectDiff(existingConfigNested, mergedObservedConfigNested)) + return c.updateConfig(ctx, syncCtx, mergedObservedConfigNested, c.updateNestedConfigHelper) + } + return nil +} + +type updateObservedConfigFn func(config map[string]interface{}) v1helpers.UpdateOperatorSpecFunc + +func (c ConfigObserver) updateConfig(ctx context.Context, syncCtx factory.SyncContext, updatedMaybeNestedConfig map[string]interface{}, updateConfigHelper updateObservedConfigFn) error { + if _, _, err := v1helpers.UpdateSpec(ctx, c.operatorClient, updateConfigHelper(updatedMaybeNestedConfig)); err != nil { + // At this point we failed to write the updated config. If we are permanently broken, do not pile the errors from observers + // but instead reset the errors and only report single error condition. + syncCtx.Recorder().Warningf("ObservedConfigWriteError", "Failed to write observed config: %v", err) + return fmt.Errorf("error writing updated observed config: %v", err) + } + return nil +} + +// updateNestedConfigHelper returns a helper function for updating the nested config. +func (c ConfigObserver) updateNestedConfigHelper(updatedNestedConfig map[string]interface{}) v1helpers.UpdateOperatorSpecFunc { + return func(currentSpec *operatorv1.OperatorSpec) error { + existingConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(currentSpec.ObservedConfig.Raw)).Decode(&existingConfig); err != nil { + klog.V(4).Infof("decode of existing config failed with error: %v", err) + } + if err := unstructured.SetNestedField(existingConfig, updatedNestedConfig, c.nestedConfigPath...); err != nil { + return fmt.Errorf("unable to set the nested (%q) observed config: %v", strings.Join(c.nestedConfigPath, "/"), err) + } + currentSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: existingConfig}} + return nil + } +} + +// listersToInformer converts the Listers interface to informer with empty AddEventHandler as we only care about synced caches in the Run. +func listersToInformer(l Listers) []factory.Informer { + result := make([]factory.Informer, len(l.PreRunHasSynced())) + for i := range l.PreRunHasSynced() { + result[i] = &listerInformer{cacheSynced: l.PreRunHasSynced()[i]} + } + return result +} + +type listerInformer struct { + cacheSynced cache.InformerSynced +} + +func (l *listerInformer) AddEventHandler(cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) { + return nil, nil +} + +func (l *listerInformer) HasSynced() bool { + return l.cacheSynced() +} + +// WithPrefix adds a prefix to the path the input observer would otherwise observe into +func WithPrefix(observer ObserveConfigFunc, prefix ...string) ObserveConfigFunc { + if len(prefix) == 0 { + return observer + } + + return func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + errs := []error{} + + nestedExistingConfig, _, err := unstructured.NestedMap(existingConfig, prefix...) + if err != nil { + errs = append(errs, err) + } + + orig, observerErrs := observer(listers, recorder, nestedExistingConfig) + errs = append(errs, observerErrs...) + + if orig == nil { + return nil, errs + } + + ret := map[string]interface{}{} + if err := unstructured.SetNestedField(ret, orig, prefix...); err != nil { + errs = append(errs, err) + } + return ret, errs + + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/featuregate.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/featuregate.go new file mode 100644 index 000000000..5ff0f3af0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/featuregate.go @@ -0,0 +1,47 @@ +package featuregates + +import ( + "fmt" + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +// FeatureGate indicates whether a given feature is enabled or not +// This interface is heavily influenced by k8s.io/component-base, but not exactly compatible. +type FeatureGate interface { + // Enabled returns true if the key is enabled. + Enabled(key configv1.FeatureGateName) bool + // KnownFeatures returns a slice of strings describing the FeatureGate's known features. + KnownFeatures() []configv1.FeatureGateName +} + +type featureGate struct { + enabled sets.Set[configv1.FeatureGateName] + disabled sets.Set[configv1.FeatureGateName] +} + +func NewFeatureGate(enabled, disabled []configv1.FeatureGateName) FeatureGate { + return &featureGate{ + enabled: sets.New[configv1.FeatureGateName](enabled...), + disabled: sets.New[configv1.FeatureGateName](disabled...), + } +} + +func (f *featureGate) Enabled(key configv1.FeatureGateName) bool { + if f.enabled.Has(key) { + return true + } + if f.disabled.Has(key) { + return false + } + + panic(fmt.Errorf("feature %q is not registered in FeatureGates %v", key, f.KnownFeatures())) +} + +func (f *featureGate) KnownFeatures() []configv1.FeatureGateName { + allKnown := sets.NewString() + allKnown.Insert(FeatureGateNamesToStrings(f.enabled.UnsortedList())...) + allKnown.Insert(FeatureGateNamesToStrings(f.disabled.UnsortedList())...) + + return StringsToFeatureGateNames(allKnown.List()) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/hardcoded_featuregate_reader.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/hardcoded_featuregate_reader.go new file mode 100644 index 000000000..58ae71763 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/hardcoded_featuregate_reader.go @@ -0,0 +1,78 @@ +package featuregates + +import ( + "context" + "fmt" + + configv1 "github.com/openshift/api/config/v1" +) + +type hardcodedFeatureGateAccess struct { + enabled []configv1.FeatureGateName + disabled []configv1.FeatureGateName + readErr error + + initialFeatureGatesObserved chan struct{} +} + +// NewHardcodedFeatureGateAccess returns a FeatureGateAccess that is always initialized and always +// returns the provided feature gates. +func NewHardcodedFeatureGateAccess(enabled, disabled []configv1.FeatureGateName) FeatureGateAccess { + initialFeatureGatesObserved := make(chan struct{}) + close(initialFeatureGatesObserved) + c := &hardcodedFeatureGateAccess{ + enabled: enabled, + disabled: disabled, + initialFeatureGatesObserved: initialFeatureGatesObserved, + } + + return c +} + +// NewHardcodedFeatureGateAccessForTesting returns a FeatureGateAccess that returns stub responses +// using caller-supplied values. +func NewHardcodedFeatureGateAccessForTesting(enabled, disabled []configv1.FeatureGateName, initialFeatureGatesObserved chan struct{}, readErr error) FeatureGateAccess { + return &hardcodedFeatureGateAccess{ + enabled: enabled, + disabled: disabled, + initialFeatureGatesObserved: initialFeatureGatesObserved, + readErr: readErr, + } +} + +func (c *hardcodedFeatureGateAccess) SetChangeHandler(featureGateChangeHandlerFn FeatureGateChangeHandlerFunc) { + // ignore +} + +func (c *hardcodedFeatureGateAccess) Run(ctx context.Context) { + // ignore +} + +func (c *hardcodedFeatureGateAccess) InitialFeatureGatesObserved() <-chan struct{} { + return c.initialFeatureGatesObserved +} + +func (c *hardcodedFeatureGateAccess) AreInitialFeatureGatesObserved() bool { + select { + case <-c.InitialFeatureGatesObserved(): + return true + default: + return false + } +} + +func (c *hardcodedFeatureGateAccess) CurrentFeatureGates() (FeatureGate, error) { + return NewFeatureGate(c.enabled, c.disabled), c.readErr +} + +// NewHardcodedFeatureGateAccessFromFeatureGate returns a FeatureGateAccess that is static and initialised from +// a populated FeatureGate status. +// If the desired version is missing, this will return an error. +func NewHardcodedFeatureGateAccessFromFeatureGate(featureGate *configv1.FeatureGate, desiredVersion string) (FeatureGateAccess, error) { + features, err := featuresFromFeatureGate(featureGate, desiredVersion) + if err != nil { + return nil, fmt.Errorf("unable to determine features: %w", err) + } + + return NewHardcodedFeatureGateAccess(features.Enabled, features.Disabled), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go new file mode 100644 index 000000000..0f2cb85fd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go @@ -0,0 +1,118 @@ +package featuregates + +import ( + "fmt" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +// NewObserveFeatureFlagsFunc produces a configobserver for feature gates. If non-nil, the featureWhitelist filters +// feature gates to a known subset (instead of everything). The featureBlacklist will stop certain features from making +// it through the list. The featureBlacklist should be empty, but for a brief time, some featuregates may need to skipped. +// @smarterclayton will live forever in shame for being the first to require this for "IPv6DualStack". +func NewObserveFeatureFlagsFunc(featureWhitelist sets.Set[configv1.FeatureGateName], featureBlacklist sets.Set[configv1.FeatureGateName], configPath []string, featureGateAccess FeatureGateAccess) configobserver.ObserveConfigFunc { + return (&featureFlags{ + allowAll: len(featureWhitelist) == 0, + featureWhitelist: featureWhitelist, + featureBlacklist: featureBlacklist, + configPath: configPath, + featureGateAccess: featureGateAccess, + }).ObserveFeatureFlags +} + +type featureFlags struct { + allowAll bool + featureWhitelist sets.Set[configv1.FeatureGateName] + // we add a forceDisableFeature list because we've now had bad featuregates break individual operators. Awesome. + featureBlacklist sets.Set[configv1.FeatureGateName] + configPath []string + featureGateAccess FeatureGateAccess +} + +// ObserveFeatureFlags fills in --feature-flags for the kube-apiserver +func (f *featureFlags) ObserveFeatureFlags(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + prunedExistingConfig := configobserver.Pruned(existingConfig, f.configPath) + + errs := []error{} + + if !f.featureGateAccess.AreInitialFeatureGatesObserved() { + // if we haven't observed featuregates yet, return the existing + return prunedExistingConfig, nil + } + + featureGates, err := f.featureGateAccess.CurrentFeatureGates() + if err != nil { + return prunedExistingConfig, append(errs, err) + } + observedConfig := map[string]interface{}{} + newConfigValue := f.getWhitelistedFeatureNames(featureGates) + + currentConfigValue, _, err := unstructured.NestedStringSlice(existingConfig, f.configPath...) + if err != nil { + errs = append(errs, err) + // keep going on read error from existing config + } + if !reflect.DeepEqual(currentConfigValue, newConfigValue) { + recorder.Eventf("ObserveFeatureFlagsUpdated", "Updated %v to %s", strings.Join(f.configPath, "."), strings.Join(newConfigValue, ",")) + } + + if err := unstructured.SetNestedStringSlice(observedConfig, newConfigValue, f.configPath...); err != nil { + recorder.Warningf("ObserveFeatureFlags", "Failed setting %v: %v", strings.Join(f.configPath, "."), err) + return prunedExistingConfig, append(errs, err) + } + + return configobserver.Pruned(observedConfig, f.configPath), errs +} + +func (f *featureFlags) getWhitelistedFeatureNames(featureGates FeatureGate) []string { + newConfigValue := []string{} + formatEnabledFunc := func(fs configv1.FeatureGateName) string { + return fmt.Sprintf("%v=true", fs) + } + formatDisabledFunc := func(fs configv1.FeatureGateName) string { + return fmt.Sprintf("%v=false", fs) + } + + for _, knownFeatureGate := range featureGates.KnownFeatures() { + if f.featureBlacklist.Has(knownFeatureGate) { + continue + } + // only add whitelisted feature flags + if !f.allowAll && !f.featureWhitelist.Has(knownFeatureGate) { + continue + } + + if featureGates.Enabled(knownFeatureGate) { + newConfigValue = append(newConfigValue, formatEnabledFunc(knownFeatureGate)) + } else { + newConfigValue = append(newConfigValue, formatDisabledFunc(knownFeatureGate)) + } + } + + return newConfigValue +} + +func StringsToFeatureGateNames(in []string) []configv1.FeatureGateName { + out := []configv1.FeatureGateName{} + for _, curr := range in { + out = append(out, configv1.FeatureGateName(curr)) + } + + return out +} + +func FeatureGateNamesToStrings(in []configv1.FeatureGateName) []string { + out := []string{} + for _, curr := range in { + out = append(out, string(curr)) + } + + return out +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/simple_featuregate_reader.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/simple_featuregate_reader.go new file mode 100644 index 000000000..4b2caccd6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/simple_featuregate_reader.go @@ -0,0 +1,318 @@ +package featuregates + +import ( + "context" + "fmt" + "os" + "reflect" + "sync" + "time" + + configv1 "github.com/openshift/api/config/v1" + + v1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +type FeatureGateChangeHandlerFunc func(featureChange FeatureChange) + +// FeatureGateAccess is used to get a list of enabled and disabled featuregates. +// Create a new instance using NewFeatureGateAccess. +// To create one for unit testing, use NewHardcodedFeatureGateAccess. +type FeatureGateAccess interface { + // SetChangeHandler can only be called before Run. + // The default change handler will exit 0 when the set of featuregates changes. + // That is usually the easiest and simplest thing for an *operator* to do. + // This also discourages direct operand reading since all operands restarting simultaneously is bad. + // This function allows changing that default behavior to something else (perhaps a channel notification for + // all impacted controllers in an operator. + // I doubt this will be worth the effort in the majority of cases. + SetChangeHandler(featureGateChangeHandlerFn FeatureGateChangeHandlerFunc) + + // Run starts a go func that continously watches the set of featuregates enabled in the cluster. + Run(ctx context.Context) + // InitialFeatureGatesObserved returns a channel that is closed once the featuregates have + // been observed. Once closed, the CurrentFeatureGates method will return the current set of + // featuregates and will never return a non-nil error. + InitialFeatureGatesObserved() <-chan struct{} + // CurrentFeatureGates returns the list of enabled and disabled featuregates. + // It returns an error if the current set of featuregates is not known. + CurrentFeatureGates() (FeatureGate, error) + // AreInitialFeatureGatesObserved returns true if the initial featuregates have been observed. + AreInitialFeatureGatesObserved() bool +} + +type Features struct { + Enabled []configv1.FeatureGateName + Disabled []configv1.FeatureGateName +} + +type FeatureChange struct { + Previous *Features + New Features +} + +type defaultFeatureGateAccess struct { + desiredVersion string + missingVersionMarker string + clusterVersionLister configlistersv1.ClusterVersionLister + featureGateLister configlistersv1.FeatureGateLister + initialFeatureGatesObserved chan struct{} + + featureGateChangeHandlerFn FeatureGateChangeHandlerFunc + + lock sync.Mutex + started bool + initialFeatures Features + currentFeatures Features + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewFeatureGateAccess returns a controller that keeps the list of enabled/disabled featuregates up to date. +// desiredVersion is the version of this operator that would be set on the clusteroperator.status.versions. +// missingVersionMarker is the stub version provided by the operator. If that is also the desired version, +// then the most either the desired clusterVersion or most recent version will be used. +// clusterVersionInformer is used when desiredVersion and missingVersionMarker are the same to derive the "best" version +// of featuregates to use. +// featureGateInformer is used to track changes to the featureGates once they are initially set. +// By default, when the enabled/disabled list of featuregates changes, os.Exit is called. This behavior can be +// overridden by calling SetChangeHandler to whatever you wish the behavior to be. +// A common construct is: +/* go +featureGateAccessor := NewFeatureGateAccess(args) +go featureGateAccessor.Run(ctx) + +select{ +case <- featureGateAccessor.InitialFeatureGatesObserved(): + featureGates, _ := featureGateAccessor.CurrentFeatureGates() + klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures()) +case <- time.After(1*time.Minute): + klog.Errorf("timed out waiting for FeatureGate detection") + return fmt.Errorf("timed out waiting for FeatureGate detection") +} + +// whatever other initialization you have to do, at this point you have FeatureGates to drive your behavior. +*/ +// That construct is easy. It is better to use the .spec.observedConfiguration construct common in library-go operators +// to avoid gating your general startup on FeatureGate determination, but if you haven't already got that mechanism +// this construct is easy. +func NewFeatureGateAccess( + desiredVersion, missingVersionMarker string, + clusterVersionInformer v1.ClusterVersionInformer, + featureGateInformer v1.FeatureGateInformer, + eventRecorder events.Recorder) FeatureGateAccess { + c := &defaultFeatureGateAccess{ + desiredVersion: desiredVersion, + missingVersionMarker: missingVersionMarker, + clusterVersionLister: clusterVersionInformer.Lister(), + featureGateLister: featureGateInformer.Lister(), + initialFeatureGatesObserved: make(chan struct{}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "feature-gate-detector"), + eventRecorder: eventRecorder, + } + c.SetChangeHandler(ForceExit) + + // we aren't expecting many + clusterVersionInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.queue.Add("cluster") + }, + UpdateFunc: func(old, cur interface{}) { + c.queue.Add("cluster") + }, + DeleteFunc: func(uncast interface{}) { + c.queue.Add("cluster") + }, + }) + featureGateInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.queue.Add("cluster") + }, + UpdateFunc: func(old, cur interface{}) { + c.queue.Add("cluster") + }, + DeleteFunc: func(uncast interface{}) { + c.queue.Add("cluster") + }, + }) + + return c +} + +func ForceExit(featureChange FeatureChange) { + if featureChange.Previous != nil { + os.Exit(0) + } +} + +func (c *defaultFeatureGateAccess) SetChangeHandler(featureGateChangeHandlerFn FeatureGateChangeHandlerFunc) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.started { + panic("programmer error, cannot update the change handler after starting") + } + c.featureGateChangeHandlerFn = featureGateChangeHandlerFn +} + +func (c *defaultFeatureGateAccess) Run(ctx context.Context) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting feature-gate-detector") + defer klog.Infof("Shutting down feature-gate-detector") + + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *defaultFeatureGateAccess) syncHandler(ctx context.Context) error { + desiredVersion := c.desiredVersion + if c.missingVersionMarker == c.desiredVersion { + clusterVersion, err := c.clusterVersionLister.Get("version") + if apierrors.IsNotFound(err) { + return nil // we will be re-triggered when it is created + } + if err != nil { + return err + } + + desiredVersion = clusterVersion.Status.Desired.Version + if len(desiredVersion) == 0 && len(clusterVersion.Status.History) > 0 { + desiredVersion = clusterVersion.Status.History[0].Version + } + } + + featureGate, err := c.featureGateLister.Get("cluster") + if apierrors.IsNotFound(err) { + return nil // we will be re-triggered when it is created + } + if err != nil { + return err + } + + features, err := featuresFromFeatureGate(featureGate, desiredVersion) + if err != nil { + return fmt.Errorf("unable to determine features: %w", err) + } + + c.setFeatureGates(features) + + return nil +} + +func (c *defaultFeatureGateAccess) setFeatureGates(features Features) { + c.lock.Lock() + defer c.lock.Unlock() + + var previousFeatures *Features + if c.AreInitialFeatureGatesObserved() { + t := c.currentFeatures + previousFeatures = &t + } + + c.currentFeatures = features + + if !c.AreInitialFeatureGatesObserved() { + c.initialFeatures = features + close(c.initialFeatureGatesObserved) + c.eventRecorder.Eventf("FeatureGatesInitialized", "FeatureGates updated to %#v", c.currentFeatures) + } + + if previousFeatures == nil || !reflect.DeepEqual(*previousFeatures, c.currentFeatures) { + if previousFeatures != nil { + c.eventRecorder.Eventf("FeatureGatesModified", "FeatureGates updated to %#v", c.currentFeatures) + } + + c.featureGateChangeHandlerFn(FeatureChange{ + Previous: previousFeatures, + New: c.currentFeatures, + }) + } +} + +func (c *defaultFeatureGateAccess) InitialFeatureGatesObserved() <-chan struct{} { + return c.initialFeatureGatesObserved +} + +func (c *defaultFeatureGateAccess) AreInitialFeatureGatesObserved() bool { + select { + case <-c.InitialFeatureGatesObserved(): + return true + default: + return false + } +} + +func (c *defaultFeatureGateAccess) CurrentFeatureGates() (FeatureGate, error) { + c.lock.Lock() + defer c.lock.Unlock() + + if !c.AreInitialFeatureGatesObserved() { + return nil, fmt.Errorf("featureGates not yet observed") + } + retEnabled := make([]configv1.FeatureGateName, len(c.currentFeatures.Enabled)) + retDisabled := make([]configv1.FeatureGateName, len(c.currentFeatures.Disabled)) + copy(retEnabled, c.currentFeatures.Enabled) + copy(retDisabled, c.currentFeatures.Disabled) + + return NewFeatureGate(retEnabled, retDisabled), nil +} + +func (c *defaultFeatureGateAccess) runWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +func (c *defaultFeatureGateAccess) processNextWorkItem(ctx context.Context) bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.syncHandler(ctx) + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func featuresFromFeatureGate(featureGate *configv1.FeatureGate, desiredVersion string) (Features, error) { + found := false + features := Features{} + for _, featureGateValues := range featureGate.Status.FeatureGates { + if featureGateValues.Version != desiredVersion { + continue + } + found = true + for _, enabled := range featureGateValues.Enabled { + features.Enabled = append(features.Enabled, enabled.Name) + } + for _, disabled := range featureGateValues.Disabled { + features.Disabled = append(features.Disabled, disabled.Name) + } + break + } + + if !found { + return Features{}, fmt.Errorf("missing desired version %q in featuregates.config.openshift.io/cluster", desiredVersion) + } + + return features, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go new file mode 100644 index 000000000..27b92d0fa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go @@ -0,0 +1,45 @@ +package configobserver + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// Pruned returns the unstructured filtered by the given paths, i.e. everything +// outside of them will be dropped. The returned data structure might overlap +// with the input, but the input is not mutated. In case of error for a path, +// that path is dropped. +func Pruned(obj map[string]interface{}, pths ...[]string) map[string]interface{} { + if obj == nil || len(pths) == 0 { + return obj + } + + ret := map[string]interface{}{} + if len(pths) == 1 { + x, found, err := unstructured.NestedFieldCopy(obj, pths[0]...) + if err != nil || !found { + return ret + } + unstructured.SetNestedField(ret, x, pths[0]...) + return ret + } + + for i, p := range pths { + x, found, err := unstructured.NestedFieldCopy(obj, p...) + if err != nil { + continue + } + if !found { + continue + } + if i < len(pths)-1 { + // this might be overwritten by a later path + x = runtime.DeepCopyJSONValue(x) + } + if err := unstructured.SetNestedField(ret, x, p...); err != nil { + continue + } + } + + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go new file mode 100644 index 000000000..294770f3e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go @@ -0,0 +1,77 @@ +package management + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +var ( + allowOperatorUnmanagedState = true + allowOperatorRemovedState = true +) + +// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state. +// This is a case of control plane operators or operators that are required to always run otherwise the cluster will +// get into unstable state or critical components will stop working. +func SetOperatorAlwaysManaged() { + allowOperatorUnmanagedState = false +} + +// SetOperatorUnmanageable is one time choice when an operator wants to support the "unmanaged" state. +// This is the default setting, provided here mostly for unit tests. +func SetOperatorUnmanageable() { + allowOperatorUnmanagedState = true +} + +// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support +// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a +// bricked, non-automatically recoverable state. +func SetOperatorNotRemovable() { + allowOperatorRemovedState = false +} + +// SetOperatorRemovable is one time choice the operator author can make to indicate the operator supports +// removing of his operand. +// This is the default setting, provided here mostly for unit tests. +func SetOperatorRemovable() { + allowOperatorRemovedState = true +} + +// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state. +func IsOperatorAlwaysManaged() bool { + return !allowOperatorUnmanagedState +} + +// IsOperatorNotRemovable means the operator can't be set to removed state. +func IsOperatorNotRemovable() bool { + return !allowOperatorRemovedState +} + +// IsOperatorRemovable means the operator can be set to removed state. +func IsOperatorRemovable() bool { + return allowOperatorRemovedState +} + +func IsOperatorUnknownState(state v1.ManagementState) bool { + switch state { + case v1.Managed, v1.Removed, v1.Unmanaged: + return false + default: + return true + } +} + +// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand. +func IsOperatorManaged(state v1.ManagementState) bool { + if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() { + return true + } + switch state { + case v1.Managed: + return true + case v1.Removed: + return false + case v1.Unmanaged: + return false + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go index f9bbf0f1a..9bf7c38c5 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go @@ -18,6 +18,8 @@ import ( const ( // Label on the CSIDriver to declare the driver's effective pod security profile csiInlineVolProfileLabel = "security.openshift.io/csi-ephemeral-volume-profile" + + defaultScAnnotationKey = "storageclass.kubernetes.io/is-default-class" ) var ( @@ -42,6 +44,22 @@ func ApplyStorageClass(ctx context.Context, client storageclientv1.StorageClasse return nil, false, err } + if required.ObjectMeta.ResourceVersion != "" && required.ObjectMeta.ResourceVersion != existing.ObjectMeta.ResourceVersion { + err = fmt.Errorf("rejected to update StorageClass %s because the object has been modified: desired/actual ResourceVersion: %v/%v", + required.Name, required.ObjectMeta.ResourceVersion, existing.ObjectMeta.ResourceVersion) + return nil, false, err + } + // Our caller may not be able to set required.ObjectMeta.ResourceVersion. We only want to overwrite value of + // default storage class annotation if it is missing in existing.Annotations + if existing.Annotations != nil { + if _, ok := existing.Annotations[defaultScAnnotationKey]; ok { + if required.Annotations == nil { + required.Annotations = make(map[string]string) + } + required.Annotations[defaultScAnnotationKey] = existing.Annotations[defaultScAnnotationKey] + } + } + // First, let's compare ObjectMeta from both objects modified := resourcemerge.BoolPtr(false) existingCopy := existing.DeepCopy() diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go new file mode 100644 index 000000000..f5a26338b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go @@ -0,0 +1,67 @@ +package resourcesynccontroller + +import ( + "crypto/x509" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/util/cert" + + "github.com/openshift/library-go/pkg/crypto" +) + +func CombineCABundleConfigMaps(destinationConfigMap ResourceLocation, lister corev1listers.ConfigMapLister, inputConfigMaps ...ResourceLocation) (*corev1.ConfigMap, error) { + certificates := []*x509.Certificate{} + for _, input := range inputConfigMaps { + inputConfigMap, err := lister.ConfigMaps(input.Namespace).Get(input.Name) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + return nil, err + } + + // configmaps must conform to this + inputContent := inputConfigMap.Data["ca-bundle.crt"] + if len(inputContent) == 0 { + continue + } + inputCerts, err := cert.ParseCertsPEM([]byte(inputContent)) + if err != nil { + return nil, fmt.Errorf("configmap/%s in %q is malformed: %v", input.Name, input.Namespace, err) + } + certificates = append(certificates, inputCerts...) + } + + certificates = crypto.FilterExpiredCerts(certificates...) + finalCertificates := []*x509.Certificate{} + // now check for duplicates. n^2, but super simple + for i := range certificates { + found := false + for j := range finalCertificates { + if reflect.DeepEqual(certificates[i].Raw, finalCertificates[j].Raw) { + found = true + break + } + } + if !found { + finalCertificates = append(finalCertificates, certificates[i]) + } + } + + caBytes, err := crypto.EncodeCertificates(finalCertificates...) + if err != nil { + return nil, err + } + + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: destinationConfigMap.Namespace, Name: destinationConfigMap.Name}, + Data: map[string]string{ + "ca-bundle.crt": string(caBytes), + }, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go new file mode 100644 index 000000000..c53af8bdf --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go @@ -0,0 +1,41 @@ +package resourcesynccontroller + +import "k8s.io/apimachinery/pkg/util/sets" + +// ResourceLocation describes coordinates for a resource to be synced +type ResourceLocation struct { + Namespace string `json:"namespace"` + Name string `json:"name"` + + // Provider if set for the source location enhance the error message to point to the component which + // provide this resource. + Provider string `json:"provider,omitempty"` +} + +// PreconditionsFulfilled is a function that indicates whether all prerequisites +// are met and a resource can be synced. +type preconditionsFulfilled func() (bool, error) + +func alwaysFulfilledPreconditions() (bool, error) { return true, nil } + +type syncRuleSource struct { + ResourceLocation + syncedKeys sets.String // defines the set of keys to sync from source to dest + preconditionsFulfilledFn preconditionsFulfilled // preconditions to fulfill before syncing the resource +} + +type syncRules map[ResourceLocation]syncRuleSource + +var ( + emptyResourceLocation = ResourceLocation{} +) + +// ResourceSyncer allows changes to syncing rules by this controller +type ResourceSyncer interface { + // SyncConfigMap indicates that a configmap should be copied from the source to the destination. It will also + // mirror a deletion from the source. If the source is a zero object the destination will be deleted. + SyncConfigMap(destination, source ResourceLocation) error + // SyncSecret indicates that a secret should be copied from the source to the destination. It will also + // mirror a deletion from the source. If the source is a zero object the destination will be deleted. + SyncSecret(destination, source ResourceLocation) error +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go new file mode 100644 index 000000000..02cdedb17 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go @@ -0,0 +1,340 @@ +package resourcesynccontroller + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// ResourceSyncController is a controller that will copy source configmaps and secrets to their destinations. +// It will also mirror deletions by deleting destinations. +type ResourceSyncController struct { + name string + // syncRuleLock is used to ensure we avoid races on changes to syncing rules + syncRuleLock sync.RWMutex + // configMapSyncRules is a map from destination location to source location + configMapSyncRules syncRules + // secretSyncRules is a map from destination location to source location + secretSyncRules syncRules + + // knownNamespaces is the list of namespaces we are watching. + knownNamespaces sets.String + + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces + operatorConfigClient v1helpers.OperatorClient + + runFn func(ctx context.Context, workers int) + syncCtx factory.SyncContext +} + +var _ ResourceSyncer = &ResourceSyncController{} +var _ factory.Controller = &ResourceSyncController{} + +// NewResourceSyncController creates ResourceSyncController. +func NewResourceSyncController( + operatorConfigClient v1helpers.OperatorClient, + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + secretsGetter corev1client.SecretsGetter, + configMapsGetter corev1client.ConfigMapsGetter, + eventRecorder events.Recorder, +) *ResourceSyncController { + c := &ResourceSyncController{ + name: "ResourceSyncController", + operatorConfigClient: operatorConfigClient, + + configMapSyncRules: syncRules{}, + secretSyncRules: syncRules{}, + kubeInformersForNamespaces: kubeInformersForNamespaces, + knownNamespaces: kubeInformersForNamespaces.Namespaces(), + + configMapGetter: v1helpers.CachedConfigMapGetter(configMapsGetter, kubeInformersForNamespaces), + secretGetter: v1helpers.CachedSecretGetter(secretsGetter, kubeInformersForNamespaces), + syncCtx: factory.NewSyncContext("ResourceSyncController", eventRecorder.WithComponentSuffix("resource-sync-controller")), + } + + informers := []factory.Informer{ + operatorConfigClient.Informer(), + } + for namespace := range kubeInformersForNamespaces.Namespaces() { + if len(namespace) == 0 { + continue + } + informer := kubeInformersForNamespaces.InformersFor(namespace) + informers = append(informers, informer.Core().V1().ConfigMaps().Informer()) + informers = append(informers, informer.Core().V1().Secrets().Informer()) + } + + f := factory.New().WithSync(c.Sync).WithSyncContext(c.syncCtx).WithInformers(informers...).ResyncEvery(time.Minute).ToController(c.name, eventRecorder.WithComponentSuffix("resource-sync-controller")) + c.runFn = f.Run + + return c +} + +func (c *ResourceSyncController) Run(ctx context.Context, workers int) { + c.runFn(ctx, workers) +} + +func (c *ResourceSyncController) Name() string { + return c.name +} + +func (c *ResourceSyncController) SyncConfigMap(destination, source ResourceLocation) error { + return c.syncConfigMap(destination, source, alwaysFulfilledPreconditions) +} + +func (c *ResourceSyncController) SyncPartialConfigMap(destination ResourceLocation, source ResourceLocation, keys ...string) error { + return c.syncConfigMap(destination, source, alwaysFulfilledPreconditions, keys...) +} + +// SyncConfigMapConditionally adds a new configmap that the resource sync +// controller will synchronise if the given precondition is fulfilled. +func (c *ResourceSyncController) SyncConfigMapConditionally(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled) error { + return c.syncConfigMap(destination, source, preconditionsFulfilledFn) +} + +func (c *ResourceSyncController) syncConfigMap(destination ResourceLocation, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled, keys ...string) error { + if !c.knownNamespaces.Has(destination.Namespace) { + return fmt.Errorf("not watching namespace %q", destination.Namespace) + } + if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) { + return fmt.Errorf("not watching namespace %q", source.Namespace) + } + + c.syncRuleLock.Lock() + defer c.syncRuleLock.Unlock() + c.configMapSyncRules[destination] = syncRuleSource{ + ResourceLocation: source, + syncedKeys: sets.NewString(keys...), + preconditionsFulfilledFn: preconditionsFulfilledFn, + } + + // make sure the new rule is picked up + c.syncCtx.Queue().Add(c.syncCtx.QueueKey()) + return nil +} + +func (c *ResourceSyncController) SyncSecret(destination, source ResourceLocation) error { + return c.syncSecret(destination, source, alwaysFulfilledPreconditions) +} + +func (c *ResourceSyncController) SyncPartialSecret(destination, source ResourceLocation, keys ...string) error { + return c.syncSecret(destination, source, alwaysFulfilledPreconditions, keys...) +} + +// SyncSecretConditionally adds a new secret that the resource sync controller +// will synchronise if the given precondition is fulfilled. +func (c *ResourceSyncController) SyncSecretConditionally(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled) error { + return c.syncSecret(destination, source, preconditionsFulfilledFn) +} + +func (c *ResourceSyncController) syncSecret(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled, keys ...string) error { + if !c.knownNamespaces.Has(destination.Namespace) { + return fmt.Errorf("not watching namespace %q", destination.Namespace) + } + if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) { + return fmt.Errorf("not watching namespace %q", source.Namespace) + } + + c.syncRuleLock.Lock() + defer c.syncRuleLock.Unlock() + c.secretSyncRules[destination] = syncRuleSource{ + ResourceLocation: source, + syncedKeys: sets.NewString(keys...), + preconditionsFulfilledFn: preconditionsFulfilledFn, + } + + // make sure the new rule is picked up + c.syncCtx.Queue().Add(c.syncCtx.QueueKey()) + return nil +} + +// errorWithProvider provides a finger of blame in case a source resource cannot be retrieved. +func errorWithProvider(provider string, err error) error { + if len(provider) > 0 { + return fmt.Errorf("%w (check the %q that is supposed to provide this resource)", err, provider) + } + return err +} + +func (c *ResourceSyncController) Sync(ctx context.Context, syncCtx factory.SyncContext) error { + operatorSpec, _, _, err := c.operatorConfigClient.GetOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + c.syncRuleLock.RLock() + defer c.syncRuleLock.RUnlock() + + errors := []error{} + + for destination, source := range c.configMapSyncRules { + // skip the sync if the preconditions aren't fulfilled + if fulfilled, err := source.preconditionsFulfilledFn(); !fulfilled || err != nil { + if err != nil { + errors = append(errors, err) + } + continue + } + + if source.ResourceLocation == emptyResourceLocation { + // use the cache to check whether the configmap exists in target namespace, if not skip the extra delete call. + if _, err := c.configMapGetter.ConfigMaps(destination.Namespace).Get(ctx, destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + if err := c.configMapGetter.ConfigMaps(destination.Namespace).Delete(ctx, destination.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + + _, _, err := resourceapply.SyncPartialConfigMap(ctx, c.configMapGetter, syncCtx.Recorder(), source.Namespace, source.Name, destination.Namespace, destination.Name, source.syncedKeys, []metav1.OwnerReference{}) + if err != nil { + errors = append(errors, errorWithProvider(source.Provider, err)) + } + } + for destination, source := range c.secretSyncRules { + // skip the sync if the preconditions aren't fulfilled + if fulfilled, err := source.preconditionsFulfilledFn(); !fulfilled || err != nil { + if err != nil { + errors = append(errors, err) + } + continue + } + + if source.ResourceLocation == emptyResourceLocation { + // use the cache to check whether the secret exists in target namespace, if not skip the extra delete call. + if _, err := c.secretGetter.Secrets(destination.Namespace).Get(ctx, destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + if err := c.secretGetter.Secrets(destination.Namespace).Delete(ctx, destination.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + + _, _, err := resourceapply.SyncPartialSecret(ctx, c.secretGetter, syncCtx.Recorder(), source.Namespace, source.Name, destination.Namespace, destination.Name, source.syncedKeys, []metav1.OwnerReference{}) + if err != nil { + errors = append(errors, errorWithProvider(source.Provider, err)) + } + } + + if len(errors) > 0 { + cond := operatorv1.OperatorCondition{ + Type: condition.ResourceSyncControllerDegradedConditionType, + Status: operatorv1.ConditionTrue, + Reason: "Error", + Message: v1helpers.NewMultiLineAggregate(errors).Error(), + } + if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil + } + + cond := operatorv1.OperatorCondition{ + Type: condition.ResourceSyncControllerDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil +} + +func NewDebugHandler(controller *ResourceSyncController) http.Handler { + return &debugHTTPHandler{controller: controller} +} + +type debugHTTPHandler struct { + controller *ResourceSyncController +} + +type ResourceSyncRule struct { + Destination ResourceLocation `json:"destination"` + Source syncRuleSource `json:"source"` +} + +type ResourceSyncRuleList []ResourceSyncRule + +func (l ResourceSyncRuleList) Len() int { return len(l) } +func (l ResourceSyncRuleList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l ResourceSyncRuleList) Less(i, j int) bool { + if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) < 0 { + return true + } + if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) > 0 { + return false + } + if strings.Compare(l[i].Source.Name, l[j].Source.Name) < 0 { + return true + } + return false +} + +type ControllerSyncRules struct { + Secrets ResourceSyncRuleList `json:"secrets"` + Configs ResourceSyncRuleList `json:"configs"` +} + +// ServeSyncRules provides a handler function to return the sync rules of the controller +func (h *debugHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + syncRules := ControllerSyncRules{ResourceSyncRuleList{}, ResourceSyncRuleList{}} + + h.controller.syncRuleLock.RLock() + defer h.controller.syncRuleLock.RUnlock() + syncRules.Secrets = append(syncRules.Secrets, resourceSyncRuleList(h.controller.secretSyncRules)...) + syncRules.Configs = append(syncRules.Configs, resourceSyncRuleList(h.controller.configMapSyncRules)...) + + data, err := json.Marshal(syncRules) + if err != nil { + w.Write([]byte(err.Error())) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Write(data) + w.WriteHeader(http.StatusOK) +} + +func resourceSyncRuleList(syncRules syncRules) ResourceSyncRuleList { + rules := make(ResourceSyncRuleList, 0, len(syncRules)) + for dest, src := range syncRules { + rule := ResourceSyncRule{ + Source: src, + Destination: dest, + } + rules = append(rules, rule) + } + sort.Sort(rules) + return rules +} diff --git a/vendor/github.com/robfig/cron/.gitignore b/vendor/github.com/robfig/cron/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/robfig/cron/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/robfig/cron/.travis.yml b/vendor/github.com/robfig/cron/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/robfig/cron/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/robfig/cron/LICENSE b/vendor/github.com/robfig/cron/LICENSE new file mode 100644 index 000000000..3a0f627ff --- /dev/null +++ b/vendor/github.com/robfig/cron/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/README.md b/vendor/github.com/robfig/cron/README.md new file mode 100644 index 000000000..ec40c95fc --- /dev/null +++ b/vendor/github.com/robfig/cron/README.md @@ -0,0 +1,6 @@ +[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) +[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) + +# cron + +Documentation here: https://godoc.org/github.com/robfig/cron diff --git a/vendor/github.com/robfig/cron/constantdelay.go b/vendor/github.com/robfig/cron/constantdelay.go new file mode 100644 index 000000000..cd6e7b1be --- /dev/null +++ b/vendor/github.com/robfig/cron/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/cron.go b/vendor/github.com/robfig/cron/cron.go new file mode 100644 index 000000000..2318aeb2e --- /dev/null +++ b/vendor/github.com/robfig/cron/cron.go @@ -0,0 +1,259 @@ +package cron + +import ( + "log" + "runtime" + "sort" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + stop chan struct{} + add chan *Entry + snapshot chan []*Entry + running bool + ErrorLog *log.Logger + location *time.Location +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// The Schedule describes a job's duty cycle. +type Schedule interface { + // Return the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // The schedule on which this job should be run. + Schedule Schedule + + // The next time the job will run. This is the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // The last time this job was run. This is the zero time if the job has never + // been run. + Prev time.Time + + // The Job to run. + Job Job +} + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, in the Local time zone. +func New() *Cron { + return NewWithLocation(time.Now().Location()) +} + +// NewWithLocation returns a new Cron job runner. +func NewWithLocation(location *time.Location) *Cron { + return &Cron{ + entries: nil, + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan []*Entry), + running: false, + ErrorLog: nil, + location: location, + } +} + +// A wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +func (c *Cron) AddFunc(spec string, cmd func()) error { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +func (c *Cron) AddJob(spec string, cmd Job) error { + schedule, err := Parse(spec) + if err != nil { + return err + } + c.Schedule(schedule, cmd) + return nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +func (c *Cron) Schedule(schedule Schedule, cmd Job) { + entry := &Entry{ + Schedule: schedule, + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + return + } + + c.add <- entry +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []*Entry { + if c.running { + c.snapshot <- nil + x := <-c.snapshot + return x + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Start the cron scheduler in its own go-routine, or no-op if already started. +func (c *Cron) Start() { + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + if c.running { + return + } + c.running = true + c.run() +} + +func (c *Cron) runWithRecovery(j Job) { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + c.logf("cron: panic running job: %v\n%s", r, buf) + } + }() + j.Run() +} + +// Run the scheduler. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + go c.runWithRecovery(e.Job) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + + case <-c.snapshot: + c.snapshot <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + return + } + + break + } + } +} + +// Logs an error to stderr or to the configured error log +func (c *Cron) logf(format string, args ...interface{}) { + if c.ErrorLog != nil { + c.ErrorLog.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +func (c *Cron) Stop() { + if !c.running { + return + } + c.stop <- struct{}{} + c.running = false +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []*Entry { + entries := []*Entry{} + for _, e := range c.entries { + entries = append(entries, &Entry{ + Schedule: e.Schedule, + Next: e.Next, + Prev: e.Prev, + Job: e.Job, + }) + } + return entries +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} diff --git a/vendor/github.com/robfig/cron/doc.go b/vendor/github.com/robfig/cron/doc.go new file mode 100644 index 000000000..d02ec2f3b --- /dev/null +++ b/vendor/github.com/robfig/cron/doc.go @@ -0,0 +1,129 @@ +/* +Package cron implements a cron spec parser and job runner. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 6 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Seconds | Yes | 0-59 | * / , - + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun", +and "sun" are equally accepted. + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 0 1 * * + @weekly | Run once a week, midnight between Sat/Sun | 0 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates after +1 hour, 30 minutes, 10 seconds, and then every interval after that. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +All interpretation and scheduling is done in the machine's local time zone (as +provided by the Go time package (http://www.golang.org/pkg/time). + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/parser.go b/vendor/github.com/robfig/cron/parser.go new file mode 100644 index 000000000..a5e83c0a8 --- /dev/null +++ b/vendor/github.com/robfig/cron/parser.go @@ -0,0 +1,380 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption + optionals int +} + +// Creates a custom Parser with custom options. +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + return Parser{options, optionals} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("Empty spec string") + } + if spec[0] == '@' && p.options&Descriptor > 0 { + return parseDescriptor(spec) + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if p.options&place > 0 { + max++ + } + } + min := max - p.optionals + + // Split fields on whitespace + fields := strings.Fields(spec) + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec) + } + return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec) + } + + // Fill in missing fields + fields = expandFields(fields, p.options) + + var err error + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + }, nil +} + +func expandFields(fields []string, options ParseOption) []string { + n := 0 + count := len(fields) + expFields := make([]string, len(places)) + copy(expFields, defaults) + for i, place := range places { + if options&place > 0 { + expFields[i] = fields[n] + n++ + } + if n == count { + break + } + } + return expFields +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given standardSpec +// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always +// pass 5 entries representing: minute, hour, day of month, month and day of week, +// in that order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +var defaultParser = NewParser( + Second | Minute | Hour | Dom | Month | DowOptional | Descriptor, +) + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Full crontab specs, e.g. "* * * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func Parse(spec string) (Schedule, error) { + return defaultParser.Parse(spec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("Too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + default: + return 0, fmt.Errorf("Too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("Step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/spec.go b/vendor/github.com/robfig/cron/spec.go new file mode 100644 index 000000000..aac9a60b9 --- /dev/null +++ b/vendor/github.com/robfig/cron/spec.go @@ -0,0 +1,158 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach: + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go new file mode 100644 index 000000000..3d87fd72c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package user contains utilities for dealing with simple user exchange in the auth +// packages. The user.Info interface defines an interface for exchanging that info. +package user // import "k8s.io/apiserver/pkg/authentication/user" diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go new file mode 100644 index 000000000..4d6ec0980 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go @@ -0,0 +1,84 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +// Info describes a user that has been authenticated to the system. +type Info interface { + // GetName returns the name that uniquely identifies this user among all + // other active users. + GetName() string + // GetUID returns a unique value for a particular user that will change + // if the user is removed from the system and another user is added with + // the same name. + GetUID() string + // GetGroups returns the names of the groups the user is a member of + GetGroups() []string + + // GetExtra can contain any additional information that the authenticator + // thought was interesting. One example would be scopes on a token. + // Keys in this map should be namespaced to the authenticator or + // authenticator/authorizer pair making use of them. + // For instance: "example.org/foo" instead of "foo" + // This is a map[string][]string because it needs to be serializeable into + // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization + // delegation flows + // In order to faithfully round-trip through an impersonation flow, these keys + // MUST be lowercase. + GetExtra() map[string][]string +} + +// DefaultInfo provides a simple user information exchange object +// for components that implement the UserInfo interface. +type DefaultInfo struct { + Name string + UID string + Groups []string + Extra map[string][]string +} + +func (i *DefaultInfo) GetName() string { + return i.Name +} + +func (i *DefaultInfo) GetUID() string { + return i.UID +} + +func (i *DefaultInfo) GetGroups() []string { + return i.Groups +} + +func (i *DefaultInfo) GetExtra() map[string][]string { + return i.Extra +} + +// well-known user and group names +const ( + SystemPrivilegedGroup = "system:masters" + NodesGroup = "system:nodes" + MonitoringGroup = "system:monitoring" + AllUnauthenticated = "system:unauthenticated" + AllAuthenticated = "system:authenticated" + + Anonymous = "system:anonymous" + APIServerUser = "system:apiserver" + + // core kubernetes process identities + KubeProxy = "system:kube-proxy" + KubeControllerManager = "system:kube-controller-manager" + KubeScheduler = "system:kube-scheduler" +) diff --git a/vendor/modules.txt b/vendor/modules.txt index 94d56feab..6d06bd5b1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -678,17 +678,24 @@ github.com/openshift/client-go/machine/informers/externalversions/machine/v1 github.com/openshift/client-go/machine/informers/externalversions/machine/v1beta1 github.com/openshift/client-go/machine/listers/machine/v1 github.com/openshift/client-go/machine/listers/machine/v1beta1 -# github.com/openshift/library-go v0.0.0-20230508110756-9b7abe2c9cbf +# github.com/openshift/library-go v0.0.0-20230706195801-561433066966 ## explicit; go 1.20 github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers github.com/openshift/library-go/pkg/config/clusterstatus github.com/openshift/library-go/pkg/config/leaderelection +github.com/openshift/library-go/pkg/controller/factory +github.com/openshift/library-go/pkg/crypto +github.com/openshift/library-go/pkg/operator/condition +github.com/openshift/library-go/pkg/operator/configobserver +github.com/openshift/library-go/pkg/operator/configobserver/featuregates github.com/openshift/library-go/pkg/operator/events +github.com/openshift/library-go/pkg/operator/management github.com/openshift/library-go/pkg/operator/resource/resourceapply github.com/openshift/library-go/pkg/operator/resource/resourcehash github.com/openshift/library-go/pkg/operator/resource/resourcehelper github.com/openshift/library-go/pkg/operator/resource/resourcemerge github.com/openshift/library-go/pkg/operator/resource/resourceread +github.com/openshift/library-go/pkg/operator/resourcesynccontroller github.com/openshift/library-go/pkg/operator/v1helpers # github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b ## explicit @@ -763,6 +770,9 @@ github.com/quasilyte/stdinfo # github.com/rivo/uniseg v0.4.2 ## explicit; go 1.18 github.com/rivo/uniseg +# github.com/robfig/cron v1.2.0 +## explicit +github.com/robfig/cron # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 @@ -1328,6 +1338,7 @@ k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/apiserver v0.27.2 ## explicit; go 1.20 +k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/storage/names # k8s.io/cli-runtime v0.27.1 ## explicit; go 1.20 From ce3fe45bfbc5cff007255778c850b6ab94ef7182 Mon Sep 17 00:00:00 2001 From: Marc Sluiter Date: Wed, 12 Jul 2023 00:21:31 +0200 Subject: [PATCH 4/4] Use feature gate accessor Signed-off-by: Marc Sluiter --- cmd/machine-api-operator/start.go | 20 +- .../0000_30_machine-api-operator_09_rbac.yaml | 19 ++ ...30_machine-api-operator_11_deployment.yaml | 4 + pkg/operator/config.go | 2 +- pkg/operator/config_test.go | 2 +- pkg/operator/operator.go | 91 ++++++-- pkg/operator/operator_test.go | 211 ++++++++---------- pkg/operator/sync_test.go | 6 +- pkg/util/featuregates/featuregates.go | 36 --- 9 files changed, 202 insertions(+), 189 deletions(-) delete mode 100644 pkg/util/featuregates/featuregates.go diff --git a/cmd/machine-api-operator/start.go b/cmd/machine-api-operator/start.go index 4b4038503..7e39ae8b0 100644 --- a/cmd/machine-api-operator/start.go +++ b/cmd/machine-api-operator/start.go @@ -10,6 +10,7 @@ import ( "strconv" osconfigv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/machine-api-operator/pkg/metrics" "github.com/openshift/machine-api-operator/pkg/operator" "github.com/openshift/machine-api-operator/pkg/util" @@ -120,7 +121,7 @@ func initMachineAPIInformers(ctx *ControllerContext) { klog.Info("Synced up machine api informer caches") } -func initRecorder(kubeClient kubernetes.Interface) (record.EventRecorder, error) { +func initEventRecorder(kubeClient kubernetes.Interface) (record.EventRecorder, error) { eventRecorderScheme := runtime.NewScheme() if err := osconfigv1.Install(eventRecorderScheme); err != nil { return nil, fmt.Errorf("failed to create event recorder scheme: %v", err) @@ -131,12 +132,25 @@ func initRecorder(kubeClient kubernetes.Interface) (record.EventRecorder, error) return eventBroadcaster.NewRecorder(eventRecorderScheme, v1.EventSource{Component: "machineapioperator"}), nil } +func initRecorder(kubeClient kubernetes.Interface) (events.Recorder, error) { + controllerRef, err := events.GetControllerReferenceForCurrentPod(context.Background(), kubeClient, componentNamespace, nil) + if err != nil { + return nil, fmt.Errorf("failed to create controller ref for recorder: %v", err) + } + recorder := events.NewKubeRecorder(kubeClient.CoreV1().Events(componentNamespace), "machineapioperator", controllerRef) + return recorder, nil +} + func startControllersOrDie(ctx *ControllerContext) { kubeClient := ctx.ClientBuilder.KubeClientOrDie(componentName) - recorder, err := initRecorder(kubeClient) + eventRecorder, err := initEventRecorder(kubeClient) if err != nil { klog.Fatalf("failed to create event recorder: %v", err) } + recorder, err := initRecorder(kubeClient) + if err != nil { + klog.Fatalf("failed to create recorder: %v", err) + } optr, err := operator.New( componentNamespace, componentName, startOpts.imagesFile, @@ -144,6 +158,7 @@ func startControllersOrDie(ctx *ControllerContext) { ctx.KubeNamespacedInformerFactory.Apps().V1().Deployments(), ctx.KubeNamespacedInformerFactory.Apps().V1().DaemonSets(), ctx.ConfigInformerFactory.Config().V1().FeatureGates(), + ctx.ConfigInformerFactory.Config().V1().ClusterVersions(), ctx.KubeNamespacedInformerFactory.Admissionregistration().V1().ValidatingWebhookConfigurations(), ctx.KubeNamespacedInformerFactory.Admissionregistration().V1().MutatingWebhookConfigurations(), ctx.ConfigInformerFactory.Config().V1().Proxies(), @@ -151,6 +166,7 @@ func startControllersOrDie(ctx *ControllerContext) { ctx.ClientBuilder.OpenshiftClientOrDie(componentName), ctx.ClientBuilder.MachineClientOrDie(componentName), ctx.ClientBuilder.DynamicClientOrDie(componentName), + eventRecorder, recorder, ) if err != nil { diff --git a/install/0000_30_machine-api-operator_09_rbac.yaml b/install/0000_30_machine-api-operator_09_rbac.yaml index d38627024..432e84bcf 100644 --- a/install/0000_30_machine-api-operator_09_rbac.yaml +++ b/install/0000_30_machine-api-operator_09_rbac.yaml @@ -314,6 +314,24 @@ rules: - patch - delete + - apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch + + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - apiGroups: - machine.openshift.io resources: @@ -394,6 +412,7 @@ rules: - featuregates - featuregates/status - proxies + - clusterversions verbs: - get - list diff --git a/install/0000_30_machine-api-operator_11_deployment.yaml b/install/0000_30_machine-api-operator_11_deployment.yaml index e21716ca1..5d71ecd2c 100644 --- a/install/0000_30_machine-api-operator_11_deployment.yaml +++ b/install/0000_30_machine-api-operator_11_deployment.yaml @@ -68,6 +68,10 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name - name: METRICS_PORT value: "8080" resources: diff --git a/pkg/operator/config.go b/pkg/operator/config.go index b61919d66..425ffc470 100644 --- a/pkg/operator/config.go +++ b/pkg/operator/config.go @@ -75,7 +75,7 @@ func getImagesFromJSONFile(filePath string) (*Images, error) { return &i, nil } -func getProviderControllerFromImages(platform configv1.PlatformType, featureGate *configv1.FeatureGate, images Images) (string, error) { +func getProviderControllerFromImages(platform configv1.PlatformType, images Images) (string, error) { switch platform { case configv1.AWSPlatformType: return images.ClusterAPIControllerAWS, nil diff --git a/pkg/operator/config_test.go b/pkg/operator/config_test.go index 191afe994..fe22d3700 100644 --- a/pkg/operator/config_test.go +++ b/pkg/operator/config_test.go @@ -350,7 +350,7 @@ func TestGetProviderControllerFromImages(t *testing.T) { } for _, test := range tests { - res, err := getProviderControllerFromImages(test.provider, &test.featureGate, *img) + res, err := getProviderControllerFromImages(test.provider, *img) if err != nil { t.Errorf("failed getProviderControllerFromImages: %v", err) } diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 2982b7bf0..bad9f7863 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -2,6 +2,7 @@ package operator import ( "context" + "errors" "fmt" "os" "time" @@ -12,8 +13,9 @@ import ( configinformersv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" machineclientset "github.com/openshift/client-go/machine/clientset/versioned" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" - "github.com/openshift/machine-api-operator/pkg/util/featuregates" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -39,6 +41,9 @@ const ( // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s maxRetries = 15 maoOwnedAnnotation = "machine.openshift.io/owned" + + releaseVersionEnvVariableName = "RELEASE_VERSION" + releaseVersionUnknownValue = "unknown" ) // Operator defines machine api operator. @@ -53,6 +58,7 @@ type Operator struct { machineClient machineclientset.Interface dynamicClient dynamic.Interface eventRecorder record.EventRecorder + recorder events.Recorder syncHandler func(ic string) (reconcile.Result, error) @@ -71,8 +77,7 @@ type Operator struct { mutatingWebhookLister admissionlisterv1.MutatingWebhookConfigurationLister mutatingWebhookListerSynced cache.InformerSynced - featureGateLister configlistersv1.FeatureGateLister - featureGateCacheSynced cache.InformerSynced + featureGateAccessor featuregates.FeatureGateAccess // queue only ever has one item, but it has nice error handling backoff/retry semantics queue workqueue.RateLimitingInterface @@ -91,6 +96,7 @@ func New( deployInformer appsinformersv1.DeploymentInformer, daemonsetInformer appsinformersv1.DaemonSetInformer, featureGateInformer configinformersv1.FeatureGateInformer, + clusterVersionInformer configinformersv1.ClusterVersionInformer, validatingWebhookInformer admissioninformersv1.ValidatingWebhookConfigurationInformer, mutatingWebhookInformer admissioninformersv1.MutatingWebhookConfigurationInformer, proxyInformer configinformersv1.ProxyInformer, @@ -99,13 +105,18 @@ func New( machineClient machineclientset.Interface, dynamicClient dynamic.Interface, - recorder record.EventRecorder, + eventRecorder record.EventRecorder, + recorder events.Recorder, ) (*Operator, error) { // we must report the version from the release payload when we report available at that level // TODO we will report the version of the operands (so our machine api implementation version) operandVersions := []osconfigv1.OperandVersion{} - if releaseVersion := os.Getenv("RELEASE_VERSION"); len(releaseVersion) > 0 { + releaseVersion := os.Getenv(releaseVersionEnvVariableName) + if len(releaseVersion) > 0 { operandVersions = append(operandVersions, osconfigv1.OperandVersion{Name: "operator", Version: releaseVersion}) + } else { + klog.Infof("%s environment variable is missing, defaulting to %q", releaseVersionEnvVariableName, releaseVersionUnknownValue) + releaseVersion = releaseVersionUnknownValue } optr := &Operator{ @@ -116,7 +127,8 @@ func New( osClient: osClient, machineClient: machineClient, dynamicClient: dynamicClient, - eventRecorder: recorder, + eventRecorder: eventRecorder, + recorder: recorder, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineapioperator"), operandVersions: operandVersions, } @@ -137,10 +149,36 @@ func New( if err != nil { return nil, fmt.Errorf("error adding event handler to mutatingwebhook informer: %v", err) } - _, err = featureGateInformer.Informer().AddEventHandler(optr.eventHandler()) - if err != nil { - return nil, fmt.Errorf("error adding event handler to featuregates informer: %v", err) - } + + desiredVersion := releaseVersion + missingVersion := "0.0.1-snapshot" + featureGateAccessor := featuregates.NewFeatureGateAccess( + desiredVersion, missingVersion, + clusterVersionInformer, featureGateInformer, + recorder, + ) + featureGateAccessor.SetChangeHandler(func(featureChange featuregates.FeatureChange) { + if featureChange.Previous == nil { + // When the initial featuregate is set, the previous version is nil. + // Nothing to do in this case, it's handled by the 1st sync, which only runs after the initial feature set was received. + return + } + + klog.V(4).InfoS("FeatureGates changed", "enabled", featureChange.New.Enabled, "disabled", featureChange.New.Disabled) + prevDisableMHC := featuregates.NewFeatureGate(featureChange.Previous.Enabled, featureChange.Previous.Disabled). + Enabled(osconfigv1.FeatureGateMachineAPIOperatorDisableMachineHealthCheckController) + newDisableMHC := featuregates.NewFeatureGate(featureChange.New.Enabled, featureChange.New.Disabled). + Enabled(osconfigv1.FeatureGateMachineAPIOperatorDisableMachineHealthCheckController) + + if prevDisableMHC != newDisableMHC { + klog.V(2).InfoS("Resync for modified feature gate", + "FeatureGateMachineAPIOperatorDisableMachineHealthCheckController enabled", newDisableMHC, + ) + workQueueKey := fmt.Sprintf("%s/%s", optr.namespace, optr.name) + optr.queue.Add(workQueueKey) + } + }) + optr.featureGateAccessor = featureGateAccessor optr.config = config optr.syncHandler = optr.sync @@ -160,9 +198,6 @@ func New( optr.mutatingWebhookLister = mutatingWebhookInformer.Lister() optr.mutatingWebhookListerSynced = mutatingWebhookInformer.Informer().HasSynced - optr.featureGateLister = featureGateInformer.Lister() - optr.featureGateCacheSynced = featureGateInformer.Informer().HasSynced - return optr, nil } @@ -179,12 +214,24 @@ func (optr *Operator) Run(workers int, stopCh <-chan struct{}) { optr.validatingWebhookListerSynced, optr.deployListerSynced, optr.daemonsetListerSynced, - optr.proxyListerSynced, - optr.featureGateCacheSynced) { + optr.proxyListerSynced) { klog.Error("Failed to sync caches") return } klog.Info("Synced up caches") + + ctx, cancelFeatureGateAccessor := context.WithCancel(context.Background()) + defer cancelFeatureGateAccessor() + go optr.featureGateAccessor.Run(ctx) + klog.Info("Started feature gate accessor") + select { + case <-optr.featureGateAccessor.InitialFeatureGatesObserved(): + klog.V(4).Info("FeatureGates initialized") + case <-time.After(1 * time.Minute): + klog.Error(errors.New("timed out waiting for FeatureGate detection"), "unable to start operator") + return + } + for i := 0; i < workers; i++ { go wait.Until(optr.worker, time.Second, stopCh) } @@ -369,12 +416,7 @@ func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { return nil, err } - featureGate, err := optr.osClient.ConfigV1().FeatureGates().Get(context.Background(), "cluster", metav1.GetOptions{}) - if err != nil { - return nil, err - } - - providerControllerImage, err := getProviderControllerFromImages(provider, featureGate, *images) + providerControllerImage, err := getProviderControllerFromImages(provider, *images) if err != nil { return nil, err } @@ -401,7 +443,12 @@ func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { // in case the MHC controller is disabled, leave its image empty mhcImage := machineAPIOperatorImage - if !featuregates.IsDeployMHCControllerEnabled(featureGate) { + featureGates, err := optr.featureGateAccessor.CurrentFeatureGates() + if err != nil { + return nil, err + } + if featureGates.Enabled(osconfigv1.FeatureGateMachineAPIOperatorDisableMachineHealthCheckController) { + klog.V(2).Info("Disabling MHC controller") mhcImage = "" } diff --git a/pkg/operator/operator_test.go b/pkg/operator/operator_test.go index 0508b4f9e..32b2d0e6d 100644 --- a/pkg/operator/operator_test.go +++ b/pkg/operator/operator_test.go @@ -15,8 +15,8 @@ import ( fakeos "github.com/openshift/client-go/config/clientset/versioned/fake" configinformersv1 "github.com/openshift/client-go/config/informers/externalversions" fakemachine "github.com/openshift/client-go/machine/clientset/versioned/fake" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" - "github.com/openshift/machine-api-operator/pkg/util/featuregates" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -38,31 +38,51 @@ const ( releaseVersion = "0.0.0.test-unit" ) -func newFakeOperator(kubeObjects, osObjects, machineObjects []runtime.Object, imagesFile string, stopCh <-chan struct{}) (*Operator, error) { +func newFakeOperator(kubeObjects, osObjects, machineObjects []runtime.Object, imagesFile string, fg *openshiftv1.FeatureGate, stopCh <-chan struct{}) (*Operator, error) { kubeClient := fakekube.NewSimpleClientset(kubeObjects...) osClient := fakeos.NewSimpleClientset(osObjects...) machineClient := fakemachine.NewSimpleClientset(machineObjects...) dynamicClient := fakedynamic.NewSimpleDynamicClient(scheme.Scheme, kubeObjects...) kubeNamespacedSharedInformer := informers.NewSharedInformerFactoryWithOptions(kubeClient, 2*time.Minute, informers.WithNamespace(targetNamespace)) configSharedInformer := configinformersv1.NewSharedInformerFactoryWithOptions(osClient, 2*time.Minute) - featureGateInformer := configSharedInformer.Config().V1().FeatureGates() deployInformer := kubeNamespacedSharedInformer.Apps().V1().Deployments() proxyInformer := configSharedInformer.Config().V1().Proxies() daemonsetInformer := kubeNamespacedSharedInformer.Apps().V1().DaemonSets() mutatingWebhookInformer := kubeNamespacedSharedInformer.Admissionregistration().V1().MutatingWebhookConfigurations() validatingWebhookInformer := kubeNamespacedSharedInformer.Admissionregistration().V1().ValidatingWebhookConfigurations() + if fg == nil { + fg = &openshiftv1.FeatureGate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Status: openshiftv1.FeatureGateStatus{ + FeatureGates: []openshiftv1.FeatureGateDetails{ + { + Version: "", + Enabled: []openshiftv1.FeatureGateAttributes{}, + Disabled: []openshiftv1.FeatureGateAttributes{{Name: openshiftv1.FeatureGateMachineAPIOperatorDisableMachineHealthCheckController}}, + }, + }, + }, + } + } + featureGateAccessor, err := featuregates.NewHardcodedFeatureGateAccessFromFeatureGate(fg, "") + if err != nil { + return nil, fmt.Errorf("error adding event handler to deployments informer: %v", err) + } + optr := &Operator{ kubeClient: kubeClient, osClient: osClient, machineClient: machineClient, dynamicClient: dynamicClient, - featureGateLister: featureGateInformer.Lister(), deployLister: deployInformer.Lister(), proxyLister: proxyInformer.Lister(), daemonsetLister: daemonsetInformer.Lister(), mutatingWebhookLister: mutatingWebhookInformer.Lister(), validatingWebhookLister: validatingWebhookInformer.Lister(), + featureGateAccessor: featureGateAccessor, imagesFile: imagesFile, namespace: targetNamespace, eventRecorder: record.NewFakeRecorder(50), @@ -70,7 +90,6 @@ func newFakeOperator(kubeObjects, osObjects, machineObjects []runtime.Object, im deployListerSynced: deployInformer.Informer().HasSynced, proxyListerSynced: proxyInformer.Informer().HasSynced, daemonsetListerSynced: daemonsetInformer.Informer().HasSynced, - featureGateCacheSynced: featureGateInformer.Informer().HasSynced, cache: resourceapply.NewResourceCache(), mutatingWebhookListerSynced: mutatingWebhookInformer.Informer().HasSynced, validatingWebhookListerSynced: validatingWebhookInformer.Informer().HasSynced, @@ -80,14 +99,10 @@ func newFakeOperator(kubeObjects, osObjects, machineObjects []runtime.Object, im kubeNamespacedSharedInformer.Start(stopCh) optr.syncHandler = optr.sync - _, err := deployInformer.Informer().AddEventHandler(optr.eventHandlerDeployments()) + _, err = deployInformer.Informer().AddEventHandler(optr.eventHandlerDeployments()) if err != nil { return nil, fmt.Errorf("error adding event handler to deployments informer: %v", err) } - _, err = featureGateInformer.Informer().AddEventHandler(optr.eventHandler()) - if err != nil { - return nil, fmt.Errorf("error adding event handler to featuregate informer: %v", err) - } optr.operandVersions = []openshiftv1.OperandVersion{ {Name: "operator", Version: releaseVersion}, @@ -179,17 +194,6 @@ func TestOperatorSync_NoOp(t *testing.T) { }, } - featureGate := &openshiftv1.FeatureGate{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", - }, - Spec: openshiftv1.FeatureGateSpec{ - FeatureGateSelection: openshiftv1.FeatureGateSelection{ - FeatureSet: openshiftv1.Default, - }, - }, - } - proxy := &openshiftv1.Proxy{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", @@ -198,7 +202,7 @@ func TestOperatorSync_NoOp(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - optr, err := newFakeOperator(nil, []runtime.Object{infra, featureGate, proxy}, nil, imagesJSONFile, stopCh) + optr, err := newFakeOperator(nil, []runtime.Object{infra, proxy}, nil, imagesJSONFile, nil, stopCh) if err != nil { t.Fatal(err) } @@ -327,17 +331,6 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, } - featureGate := &openshiftv1.FeatureGate{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", - }, - Spec: openshiftv1.FeatureGateSpec{ - FeatureGateSelection: openshiftv1.FeatureGateSelection{ - FeatureSet: openshiftv1.Default, - }, - }, - } - proxy := &openshiftv1.Proxy{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster", @@ -355,11 +348,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { expectedError error }{ { - name: string(openshiftv1.AWSPlatformType), - platform: openshiftv1.AWSPlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.AWSPlatformType), + platform: openshiftv1.AWSPlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -375,11 +367,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.AlibabaCloudPlatformType), - platform: openshiftv1.AlibabaCloudPlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.AlibabaCloudPlatformType), + platform: openshiftv1.AlibabaCloudPlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -395,11 +386,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.LibvirtPlatformType), - platform: openshiftv1.LibvirtPlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.LibvirtPlatformType), + platform: openshiftv1.LibvirtPlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -415,11 +405,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.OpenStackPlatformType), - platform: openshiftv1.OpenStackPlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.OpenStackPlatformType), + platform: openshiftv1.OpenStackPlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -435,11 +424,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.AzurePlatformType), - platform: openshiftv1.AzurePlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.AzurePlatformType), + platform: openshiftv1.AzurePlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -455,11 +443,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.BareMetalPlatformType), - platform: openshiftv1.BareMetalPlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.BareMetalPlatformType), + platform: openshiftv1.BareMetalPlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -475,11 +462,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.GCPPlatformType), - platform: openshiftv1.GCPPlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.GCPPlatformType), + platform: openshiftv1.GCPPlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -495,11 +481,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(kubemarkPlatform), - platform: kubemarkPlatform, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(kubemarkPlatform), + platform: kubemarkPlatform, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -515,11 +500,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.VSpherePlatformType), - platform: openshiftv1.VSpherePlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.VSpherePlatformType), + platform: openshiftv1.VSpherePlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -535,11 +519,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.OvirtPlatformType), - platform: openshiftv1.OvirtPlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.OvirtPlatformType), + platform: openshiftv1.OvirtPlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -555,11 +538,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: string(openshiftv1.NonePlatformType), - platform: openshiftv1.NonePlatformType, - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: string(openshiftv1.NonePlatformType), + platform: openshiftv1.NonePlatformType, + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -584,13 +566,12 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, - Spec: openshiftv1.FeatureGateSpec{ - FeatureGateSelection: openshiftv1.FeatureGateSelection{ - FeatureSet: openshiftv1.CustomNoUpgrade, - CustomNoUpgrade: &openshiftv1.CustomFeatureGates{ - Enabled: []openshiftv1.FeatureGateName{ - openshiftv1.FeatureGateName(featuregates.DeployMHCControllerFeatureGateName), - }, + Status: openshiftv1.FeatureGateStatus{ + FeatureGates: []openshiftv1.FeatureGateDetails{ + { + Version: "", + Enabled: []openshiftv1.FeatureGateAttributes{}, + Disabled: []openshiftv1.FeatureGateAttributes{{Name: openshiftv1.FeatureGateMachineAPIOperatorDisableMachineHealthCheckController}}, }, }, }, @@ -618,13 +599,12 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "cluster", }, - Spec: openshiftv1.FeatureGateSpec{ - FeatureGateSelection: openshiftv1.FeatureGateSelection{ - FeatureSet: openshiftv1.CustomNoUpgrade, - CustomNoUpgrade: &openshiftv1.CustomFeatureGates{ - Disabled: []openshiftv1.FeatureGateName{ - openshiftv1.FeatureGateName(featuregates.DeployMHCControllerFeatureGateName), - }, + Status: openshiftv1.FeatureGateStatus{ + FeatureGates: []openshiftv1.FeatureGateDetails{ + { + Version: "", + Enabled: []openshiftv1.FeatureGateAttributes{{Name: openshiftv1.FeatureGateMachineAPIOperatorDisableMachineHealthCheckController}}, + Disabled: []openshiftv1.FeatureGateAttributes{}, }, }, }, @@ -645,11 +625,10 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { }, }, { - name: "bad-platform", - platform: "bad-platform", - infra: infra, - featureGate: featureGate, - proxy: proxy, + name: "bad-platform", + platform: "bad-platform", + infra: infra, + proxy: proxy, expectedConfig: &OperatorConfig{ TargetNamespace: targetNamespace, Proxy: proxy, @@ -668,25 +647,14 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { name: "no-infra", platform: "no-infra", infra: nil, - featureGate: featureGate, proxy: proxy, expectedConfig: nil, expectedError: kerrors.NewNotFound(schema.GroupResource{Group: "config.openshift.io", Resource: "infrastructures"}, "cluster"), }, - { - name: "no-featuregate", - platform: "no-featuregate", - infra: infra, - featureGate: nil, - proxy: proxy, - expectedConfig: nil, - expectedError: kerrors.NewNotFound(schema.GroupResource{Group: "config.openshift.io", Resource: "featuregates"}, "cluster"), - }, { name: "no-proxy", platform: "no-proxy", infra: infra, - featureGate: featureGate, proxy: nil, expectedConfig: nil, expectedError: kerrors.NewNotFound(schema.GroupResource{Group: "config.openshift.io", Resource: "proxies"}, "cluster"), @@ -695,7 +663,6 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { name: "no-platform", platform: "", infra: infra, - featureGate: featureGate, proxy: proxy, expectedConfig: nil, expectedError: errors.New("no platform provider found on install config"), @@ -704,7 +671,6 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { name: "no-images-file", platform: openshiftv1.NonePlatformType, infra: infra, - featureGate: featureGate, proxy: proxy, imagesFile: "fixtures/not-found.json", expectedConfig: nil, @@ -731,9 +697,6 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { inf.Status.PlatformStatus = &openshiftv1.PlatformStatus{Type: tc.platform} objects = append(objects, inf) } - if tc.featureGate != nil { - objects = append(objects, tc.featureGate.DeepCopy()) - } if tc.proxy != nil { proxy := tc.proxy.DeepCopy() objects = append(objects, proxy) @@ -741,7 +704,7 @@ func TestMAOConfigFromInfrastructure(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - optr, err := newFakeOperator(nil, objects, nil, imagesJSONFile, stopCh) + optr, err := newFakeOperator(nil, objects, nil, imagesJSONFile, tc.featureGate, stopCh) if err != nil { t.Fatal(err) } diff --git a/pkg/operator/sync_test.go b/pkg/operator/sync_test.go index cadcf2970..57830b707 100644 --- a/pkg/operator/sync_test.go +++ b/pkg/operator/sync_test.go @@ -131,7 +131,7 @@ func TestCheckDeploymentRolloutStatus(t *testing.T) { t.Run(tc.name, func(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - optr, err := newFakeOperator([]runtime.Object{tc.deployment}, nil, nil, imagesJSONFile, stopCh) + optr, err := newFakeOperator([]runtime.Object{tc.deployment}, nil, nil, imagesJSONFile, nil, stopCh) if err != nil { t.Fatal(err) } @@ -434,7 +434,7 @@ func TestCheckMinimumWorkerMachines(t *testing.T) { machineObjects = append(machineObjects, tc.machineSets...) machineObjects = append(machineObjects, tc.machines...) - optr, err := newFakeOperator(nil, nil, machineObjects, imagesJSONFile, stopCh) + optr, err := newFakeOperator(nil, nil, machineObjects, imagesJSONFile, nil, stopCh) if err != nil { t.Fatal(err) } @@ -478,7 +478,7 @@ func TestSyncWebhookConfiguration(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - optr, err := newFakeOperator(nil, nil, nil, "", stopCh) + optr, err := newFakeOperator(nil, nil, nil, "", nil, stopCh) if err != nil { t.Fatal(err) } diff --git a/pkg/util/featuregates/featuregates.go b/pkg/util/featuregates/featuregates.go deleted file mode 100644 index b89bc81ab..000000000 --- a/pkg/util/featuregates/featuregates.go +++ /dev/null @@ -1,36 +0,0 @@ -package featuregates - -import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/klog/v2" -) - -const ( - // DeployMHCControllerFeatureGateName is the name of the feature gate for enabling the MHC controller - DeployMHCControllerFeatureGateName = "MachineAPIOperatorDeployMHCController" -) - -// IsDeployMHCControllerEnabled returns if the feature gate for the MHC controller deployment is enabled. -// For now this is an experimental feature gate, and we only check if it's disabled via the CustomNoUpgrade feature set. -// The purpose is to disable the MHC controller for being able to test the upcoming MHC feature of the NodeHealthCheck operator. -// Whenever NHC becomes the default MHC handler, the default return value needs to be changed to false! -func IsDeployMHCControllerEnabled(fg *v1.FeatureGate) bool { - deployMHCControllerFeatureGate := v1.FeatureGateName(DeployMHCControllerFeatureGateName) - if fg != nil && fg.Spec.CustomNoUpgrade != nil { - for _, enabled := range fg.Spec.CustomNoUpgrade.Enabled { - if enabled == deployMHCControllerFeatureGate { - klog.V(2).Info("MHC controller enabled by feature gate") - return true - } - } - for _, disabled := range fg.Spec.CustomNoUpgrade.Disabled { - if disabled == deployMHCControllerFeatureGate { - klog.V(2).Info("MHC controller disabled by feature gate") - return false - } - } - } - // switch to false once NHC is the default! - klog.V(4).Info("MHC controller enabled (default)") - return true -}