diff --git a/commands/kubernetes.go b/commands/kubernetes.go index 154b6433d..08470a8c1 100644 --- a/commands/kubernetes.go +++ b/commands/kubernetes.go @@ -286,7 +286,7 @@ After creating a cluster, a configuration context is added to kubectl and made a AddBoolFlag(cmdKubeClusterCreate, doctl.ArgSurgeUpgrade, "", true, "Enables surge-upgrade for the cluster") AddBoolFlag(cmdKubeClusterCreate, doctl.ArgHA, "", false, - "Creates the cluster with a highly-available control plane. Defaults to false. To enable the HA control plane, supply --ha=true.") + "Creates the cluster with a highly-available control plane. When omitted, API applies version-specific default (true for 1.36.0+; false for older). Use --ha to enable, --ha=false to disable.") AddBoolFlag(cmdKubeClusterCreate, doctl.ArgEnableControlPlaneFirewall, "", false, "Creates the cluster with control plane firewall enabled. Defaults to false. To enable the control plane firewall, supply --enable-control-plane-firewall=true.") AddStringSliceFlag(cmdKubeClusterCreate, doctl.ArgControlPlaneFirewallAllowedAddresses, "", nil, @@ -1706,7 +1706,7 @@ func buildClusterCreateRequestFromArgs(c *CmdConfig, r *godo.KubernetesClusterCr } r.SurgeUpgrade = surgeUpgrade - ha, err := c.Doit.GetBool(c.NS, doctl.ArgHA) + ha, err := c.Doit.GetBoolPtr(c.NS, doctl.ArgHA) if err != nil { return err } diff --git a/commands/kubernetes_test.go b/commands/kubernetes_test.go index a1d946c1e..ed475521a 100644 --- a/commands/kubernetes_test.go +++ b/commands/kubernetes_test.go @@ -526,7 +526,7 @@ func TestKubernetesCreate(t *testing.T) { Day: godo.KubernetesMaintenanceDayAny, }, AutoUpgrade: true, - HA: true, + HA: boolPtr(true), ControlPlaneFirewall: &godo.KubernetesControlPlaneFirewall{ Enabled: boolPtr(true), AllowedAddresses: []string{ @@ -571,7 +571,7 @@ func TestKubernetesCreate(t *testing.T) { ), }) config.Doit.Set(config.NS, doctl.ArgAutoUpgrade, testCluster.AutoUpgrade) - config.Doit.Set(config.NS, doctl.ArgHA, testCluster.HA) + config.Doit.Set(config.NS, doctl.ArgHA, true) config.Doit.Set(config.NS, doctl.ArgEnableControlPlaneFirewall, testCluster.ControlPlaneFirewall.Enabled) config.Doit.Set(config.NS, doctl.ArgControlPlaneFirewallAllowedAddresses, testCluster.ControlPlaneFirewall.AllowedAddresses) @@ -617,6 +617,43 @@ func TestKubernetesCreate(t *testing.T) { err = testK8sCmdService().RunKubernetesClusterCreate("c-8", 3)(config) assert.NoError(t, err) }) + + // Test HA omitted: when ArgHA is not set, the create request has HA: nil (API applies version-specific default). + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + clusterName := "ha-omit-cluster" + r := godo.KubernetesClusterCreateRequest{ + Name: clusterName, + RegionSlug: "sfo2", + VersionSlug: "1.13.0", + NodePools: []*godo.KubernetesNodePoolCreateRequest{ + { + Name: clusterName + "-default-pool", + Size: "s-1vcpu-2gb", + Count: 3, + }, + }, + MaintenancePolicy: &godo.KubernetesMaintenancePolicy{ + StartTime: "00:00", + Day: godo.KubernetesMaintenanceDayAny, + }, + AutoUpgrade: false, + SurgeUpgrade: true, + HA: nil, // omitted when --ha not passed + } + tm.kubernetes.EXPECT().Create(&r).Return(&testCluster, nil) + + config.Args = append(config.Args, clusterName) + config.Doit.Set(config.NS, doctl.ArgRegionSlug, "sfo2") + config.Doit.Set(config.NS, doctl.ArgClusterVersionSlug, "1.13.0") + config.Doit.Set(config.NS, doctl.ArgSizeSlug, "s-1vcpu-2gb") + config.Doit.Set(config.NS, doctl.ArgNodePoolCount, 3) + config.Doit.Set(config.NS, doctl.ArgMaintenanceWindow, "any=00:00") + config.Doit.Set(config.NS, doctl.ArgSurgeUpgrade, true) + // Do NOT set ArgHA - simulates user omitting --ha + + err := testK8sCmdService().RunKubernetesClusterCreate("s-1vcpu-2gb", 3)(config) + assert.NoError(t, err) + }) } func TestKubernetesUpdate(t *testing.T) { diff --git a/go.mod b/go.mod index 87b04caa6..ceb0e3181 100644 --- a/go.mod +++ b/go.mod @@ -132,3 +132,6 @@ require ( sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) + +// Use local godo for development (e.g. when testing HA *bool changes) +replace github.com/digitalocean/godo => ../godo diff --git a/integration/kubernetes_clusters_create_test.go b/integration/kubernetes_clusters_create_test.go index f83014d3b..513fb638f 100644 --- a/integration/kubernetes_clusters_create_test.go +++ b/integration/kubernetes_clusters_create_test.go @@ -51,6 +51,13 @@ var _ = suite("kubernetes/clusters/create", func(t *testing.T, when spec.G, it s if strings.Contains(string(reqBody), "some-node-pool-cluster") { matchedRequest = kubeNodePoolCreateJSONReq } + if strings.Contains(string(reqBody), "some-non-ha-cluster") { + matchedRequest = kubeClustersCreateNonHAJSONReq + } + // When --ha is omitted, request has no "ha" field; API applies version-specific default + if strings.Contains(string(reqBody), "some-cluster-name") && !strings.Contains(string(reqBody), `"ha"`) { + matchedRequest = kubeClustersCreateJSONReqOmitHA + } expect.JSONEq(string(reqBody), matchedRequest) @@ -116,6 +123,61 @@ var _ = suite("kubernetes/clusters/create", func(t *testing.T, when spec.G, it s expect.NoError(err, fmt.Sprintf("received error output: %s", output)) expect.Equal(strings.TrimSpace(fmt.Sprintf(kubeClustersCreateOutput, f.Name())), strings.TrimSpace(string(output))) }) + + it("creates a kube cluster with HA defaulting to true when --ha is omitted", func() { + f, err := os.CreateTemp(t.TempDir(), "fake-kube-config") + expect.NoError(err) + + err = f.Close() + expect.NoError(err) + + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "kubernetes", + "clusters", + "create", + "some-cluster-name", + "--region", "mars", + "--version", "some-kube-version", + "--1-clicks", "slug1", + ) + + cmd.Env = append(os.Environ(), + fmt.Sprintf("KUBECONFIG=%s", f.Name()), + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + expect.Equal(strings.TrimSpace(fmt.Sprintf(kubeClustersCreateOutput, f.Name())), strings.TrimSpace(string(output))) + }) + + it("creates a kube cluster with HA disabled when --ha=false", func() { + f, err := os.CreateTemp(t.TempDir(), "fake-kube-config") + expect.NoError(err) + + err = f.Close() + expect.NoError(err) + + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "kubernetes", + "clusters", + "create", + "some-non-ha-cluster", + "--region", "mars", + "--version", "some-kube-version", + "--ha=false", + ) + + cmd.Env = append(os.Environ(), + fmt.Sprintf("KUBECONFIG=%s", f.Name()), + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + }) }) when("using node-pool", func() { @@ -239,6 +301,49 @@ some-cluster-id some-cluster-name mars some-kube-version false } ] } +` + kubeClustersCreateJSONReqOmitHA = ` +{ + "name": "some-cluster-name", + "region": "mars", + "version": "some-kube-version", + "auto_upgrade": false, + "surge_upgrade": true, + "maintenance_policy": { + "day": "any", + "duration": "", + "start_time": "00:00" + }, + "node_pools": [ + { + "size": "s-1vcpu-2gb-intel", + "count": 3, + "name": "some-cluster-name-default-pool" + } + ] +} +` + kubeClustersCreateNonHAJSONReq = ` +{ + "name": "some-non-ha-cluster", + "region": "mars", + "version": "some-kube-version", + "auto_upgrade": false, + "surge_upgrade": true, + "ha": false, + "maintenance_policy": { + "day": "any", + "duration": "", + "start_time": "00:00" + }, + "node_pools": [ + { + "size": "s-1vcpu-2gb-intel", + "count": 3, + "name": "some-non-ha-cluster-default-pool" + } + ] +} ` kubeNodePoolCreateJSONReq = ` { diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 41fd1e2aa..bb7c8090c 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,14 @@ # Change Log +## [1.177.0] - 2026-03-11 + +- #959 - @blesswinsamuel - Add ListEvents, CancelEvent, and GetEventLogs APIs for App Platform +- #960 - @ZachEddy - apps: Add secure_header.remove_header to app spec definition + +## [1.176.0] - 2026-02-26 + +- #953 - @kamleshsahu - Add dbaas metrics client + ## [1.175.0] - 2026-02-12 - #952 - @bentranter - security: add cspm scanning functionality for public preview diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go index 148028cdf..bf18d3e2a 100644 --- a/vendor/github.com/digitalocean/godo/apps.gen.go +++ b/vendor/github.com/digitalocean/godo/apps.gen.go @@ -382,6 +382,8 @@ type AppSecureHeaderSpec struct { Key string `json:"key,omitempty"` // The value of the header to set. Value string `json:"value,omitempty"` + // Remove the header from incoming requests before forwarding to the app. + RemoveHeader bool `json:"remove_header,omitempty"` } // AppInstance struct for AppInstance @@ -756,6 +758,14 @@ type AutoscalerActionScaleChange struct { To int64 `json:"to,omitempty"` } +// AutoscalingEventComponentScaleChange struct for AutoscalingEventComponentScaleChange +type AutoscalingEventComponentScaleChange struct { + From int64 `json:"from,omitempty"` + To int64 `json:"to,omitempty"` + // The metric that triggered the scale change while scaling up. Known values are "cpu", "requests_per_second", "request_duration". For inactivity sleep, "scale_from_zero" and "scale_to_zero" are used. + TriggeringMetric string `json:"triggering_metric,omitempty"` +} + // BitbucketSourceSpec struct for BitbucketSourceSpec type BitbucketSourceSpec struct { Repo string `json:"repo,omitempty"` @@ -1203,6 +1213,45 @@ type AppDomainValidation struct { TXTValue string `json:"txt_value,omitempty"` } +// Event struct for Event +type Event struct { + ID string `json:"id,omitempty"` + Type EventType `json:"type,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + DeploymentID string `json:"deployment_id,omitempty"` + Deployment *Deployment `json:"deployment,omitempty"` + Autoscaling *EventAutoscalingEvent `json:"autoscaling,omitempty"` +} + +// EventAutoscalingEvent struct for EventAutoscalingEvent +type EventAutoscalingEvent struct { + Phase EventAutoscalingEventPhase `json:"phase,omitempty"` + Components map[string]AutoscalingEventComponentScaleChange `json:"components,omitempty"` +} + +// EventAutoscalingEventPhase the model 'EventAutoscalingEventPhase' +type EventAutoscalingEventPhase string + +// List of EventAutoscalingEventPhase +const ( + EVENTAUTOSCALINGEVENTPHASE_Unknown EventAutoscalingEventPhase = "UNKNOWN" + EVENTAUTOSCALINGEVENTPHASE_Pending EventAutoscalingEventPhase = "PENDING" + EVENTAUTOSCALINGEVENTPHASE_InProgress EventAutoscalingEventPhase = "IN_PROGRESS" + EVENTAUTOSCALINGEVENTPHASE_Succeeded EventAutoscalingEventPhase = "SUCCEEDED" + EVENTAUTOSCALINGEVENTPHASE_Failed EventAutoscalingEventPhase = "FAILED" + EVENTAUTOSCALINGEVENTPHASE_Canceled EventAutoscalingEventPhase = "CANCELED" +) + +// EventType the model 'EventType' +type EventType string + +// List of EventType +const ( + EVENTTYPE_Unknown EventType = "UNKNOWN" + EVENTTYPE_Deployment EventType = "DEPLOYMENT" + EVENTTYPE_Autoscaling EventType = "AUTOSCALING" +) + // FunctionsComponentHealth struct for FunctionsComponentHealth type FunctionsComponentHealth struct { Name string `json:"name,omitempty"` diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go index e1fa86d56..f35246155 100644 --- a/vendor/github.com/digitalocean/godo/apps.go +++ b/vendor/github.com/digitalocean/godo/apps.go @@ -24,6 +24,8 @@ const ( AppLogTypeRun AppLogType = "RUN" // AppLogTypeRunRestarted represents logs of crashed/restarted instances during runtime. AppLogTypeRunRestarted AppLogType = "RUN_RESTARTED" + // AppLogTypeAutoscaleEvent represents logs of an autoscaling event. + AppLogTypeAutoscaleEvent AppLogType = "AUTOSCALE_EVENT" ) // AppsService is an interface for interfacing with the App Platform endpoints @@ -81,6 +83,11 @@ type AppsService interface { GetJobInvocation(ctx context.Context, appID string, jobInvocationId string, opts *GetJobInvocationOptions) (*JobInvocation, *Response, error) GetJobInvocationLogs(ctx context.Context, appID, jobInvocationId string, opts *GetJobInvocationLogsOptions) (*AppLogs, *Response, error) CancelJobInvocation(ctx context.Context, appID, jobInvocationID string, opts *CancelJobInvocationOptions) (*JobInvocation, *Response, error) + + ListEvents(ctx context.Context, appID string, opts *ListEventsOptions) ([]*Event, *Response, error) + GetEvent(ctx context.Context, appID, eventID string) (*Event, *Response, error) + CancelEvent(ctx context.Context, appID, eventID string) (*Event, *Response, error) + GetEventLogs(ctx context.Context, appID, eventID string, opts *GetEventLogsOptions) (*AppLogs, *Response, error) } // AppLogs represent app logs. @@ -137,6 +144,22 @@ type CancelJobInvocationOptions struct { JobName string `url:"job_name,omitempty"` } +// ListEventsOptions specifies the optional parameters to the ListEvents method. +type ListEventsOptions struct { + Page int `url:"page,omitempty"` + PerPage int `url:"per_page,omitempty"` + // EventTypes filters events by type (e.g. DEPLOYMENT, AUTOSCALING). + EventTypes []string `url:"event_types,omitempty"` + // DeploymentTypes filters deployment events by deployment cause type. + DeploymentTypes []string `url:"deployment_types,omitempty"` +} + +// GetEventLogsOptions specifies the optional parameters to the GetEventLogs method. +type GetEventLogsOptions struct { + Follow bool + TailLines int +} + // DeploymentCreateRequest represents a request to create a deployment. type DeploymentCreateRequest struct { ForceBuild bool `json:"force_build"` @@ -199,6 +222,16 @@ type jobInvocationsRoot struct { Meta *Meta `json:"meta"` } +type eventRoot struct { + Event *Event `json:"event,omitempty"` +} + +type eventsRoot struct { + Events []*Event `json:"events"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + type appTierRoot struct { Tier *AppTier `json:"tier"` } @@ -546,6 +579,96 @@ func (s *AppsServiceOp) CancelJobInvocation(ctx context.Context, appID string, j return root.JobInvocation, resp, nil } +// ListEvents lists all events for a given app. +func (s *AppsServiceOp) ListEvents(ctx context.Context, appID string, opts *ListEventsOptions) ([]*Event, *Response, error) { + path := fmt.Sprintf("%s/%s/events", appsBasePath, appID) + + path, err := addOptions(path, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(eventsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + if l := root.Links; l != nil { + resp.Links = l + } + + if m := root.Meta; m != nil { + resp.Meta = m + } + return root.Events, resp, nil +} + +// GetEvent retrieves a single event for an app. +func (s *AppsServiceOp) GetEvent(ctx context.Context, appID, eventID string) (*Event, *Response, error) { + url := fmt.Sprintf("%s/%s/events/%s", appsBasePath, appID, eventID) + + req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, nil, err + } + + root := new(eventRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Event, resp, nil +} + +// CancelEvent cancels an in-progress autoscaling event. +func (s *AppsServiceOp) CancelEvent(ctx context.Context, appID, eventID string) (*Event, *Response, error) { + url := fmt.Sprintf("%s/%s/events/%s/cancel", appsBasePath, appID, eventID) + + req, err := s.client.NewRequest(ctx, http.MethodPost, url, nil) + if err != nil { + return nil, nil, err + } + + root := new(eventRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Event, resp, nil +} + +// GetEventLogs retrieves logs for an autoscaling event. +func (s *AppsServiceOp) GetEventLogs(ctx context.Context, appID, eventID string, opts *GetEventLogsOptions) (*AppLogs, *Response, error) { + url := fmt.Sprintf("%s/%s/events/%s/logs?type=%s", appsBasePath, appID, eventID, AppLogTypeAutoscaleEvent) + + if opts != nil { + if opts.Follow { + url += fmt.Sprintf("&follow=%t", opts.Follow) + } + if opts.TailLines > 0 { + url += fmt.Sprintf("&tail_lines=%d", opts.TailLines) + } + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, nil, err + } + + logs := new(AppLogs) + resp, err := s.client.Do(ctx, req, logs) + if err != nil { + return nil, resp, err + } + return logs, resp, nil +} + // GetLogs retrieves app logs. func (s *AppsServiceOp) GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool, tailLines int) (*AppLogs, *Response, error) { var url string diff --git a/vendor/github.com/digitalocean/godo/apps_accessors.go b/vendor/github.com/digitalocean/godo/apps_accessors.go index ec5b7a304..d105e8a9c 100644 --- a/vendor/github.com/digitalocean/godo/apps_accessors.go +++ b/vendor/github.com/digitalocean/godo/apps_accessors.go @@ -2637,6 +2637,30 @@ func (a *AutoscalerActionScaleChange) GetTo() int64 { return a.To } +// GetFrom returns the From field. +func (a *AutoscalingEventComponentScaleChange) GetFrom() int64 { + if a == nil { + return 0 + } + return a.From +} + +// GetTo returns the To field. +func (a *AutoscalingEventComponentScaleChange) GetTo() int64 { + if a == nil { + return 0 + } + return a.To +} + +// GetTriggeringMetric returns the TriggeringMetric field. +func (a *AutoscalingEventComponentScaleChange) GetTriggeringMetric() string { + if a == nil { + return "" + } + return a.TriggeringMetric +} + // GetBranch returns the Branch field. func (b *BitbucketSourceSpec) GetBranch() string { if b == nil { @@ -3693,6 +3717,70 @@ func (d *DetectResponseServerlessPackage) GetName() string { return d.Name } +// GetAutoscaling returns the Autoscaling field. +func (e *Event) GetAutoscaling() *EventAutoscalingEvent { + if e == nil { + return nil + } + return e.Autoscaling +} + +// GetCreatedAt returns the CreatedAt field. +func (e *Event) GetCreatedAt() time.Time { + if e == nil { + return time.Time{} + } + return e.CreatedAt +} + +// GetDeployment returns the Deployment field. +func (e *Event) GetDeployment() *Deployment { + if e == nil { + return nil + } + return e.Deployment +} + +// GetDeploymentID returns the DeploymentID field. +func (e *Event) GetDeploymentID() string { + if e == nil { + return "" + } + return e.DeploymentID +} + +// GetID returns the ID field. +func (e *Event) GetID() string { + if e == nil { + return "" + } + return e.ID +} + +// GetType returns the Type field. +func (e *Event) GetType() EventType { + if e == nil { + return "" + } + return e.Type +} + +// GetComponents returns the Components field. +func (e *EventAutoscalingEvent) GetComponents() map[string]AutoscalingEventComponentScaleChange { + if e == nil { + return nil + } + return e.Components +} + +// GetPhase returns the Phase field. +func (e *EventAutoscalingEvent) GetPhase() EventAutoscalingEventPhase { + if e == nil { + return "" + } + return e.Phase +} + // GetFunctionsComponentHealthMetrics returns the FunctionsComponentHealthMetrics field. func (f *FunctionsComponentHealth) GetFunctionsComponentHealthMetrics() []*FunctionsComponentHealthMetrics { if f == nil { diff --git a/vendor/github.com/digitalocean/godo/dedicated_inference.go b/vendor/github.com/digitalocean/godo/dedicated_inference.go new file mode 100644 index 000000000..20bacb31e --- /dev/null +++ b/vendor/github.com/digitalocean/godo/dedicated_inference.go @@ -0,0 +1,490 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "time" +) + +const dedicatedInferenceBasePath = "/v2/dedicated-inferences" + +// DedicatedInferenceService is an interface for managing Dedicated Inference with the DigitalOcean API. +type DedicatedInferenceService interface { + Create(context.Context, *DedicatedInferenceCreateRequest) (*DedicatedInference, *DedicatedInferenceToken, *Response, error) + Get(context.Context, string) (*DedicatedInference, *Response, error) + List(context.Context, *DedicatedInferenceListOptions) ([]DedicatedInferenceListItem, *Response, error) + Delete(context.Context, string) (*Response, error) + Update(context.Context, string, *DedicatedInferenceUpdateRequest) (*DedicatedInference, *Response, error) + ListAccelerators(context.Context, string, *DedicatedInferenceListAcceleratorsOptions) ([]DedicatedInferenceAcceleratorInfo, *Response, error) + CreateToken(context.Context, string, *DedicatedInferenceTokenCreateRequest) (*DedicatedInferenceToken, *Response, error) + ListTokens(context.Context, string, *ListOptions) ([]DedicatedInferenceToken, *Response, error) + RevokeToken(context.Context, string, string) (*Response, error) + GetSizes(context.Context) (*DedicatedInferenceSizesResponse, *Response, error) + GetGPUModelConfig(context.Context) (*DedicatedInferenceGPUModelConfigResponse, *Response, error) +} + +// DedicatedInferenceServiceOp handles communication with Dedicated Inference methods of the DigitalOcean API. +type DedicatedInferenceServiceOp struct { + client *Client +} + +var _ DedicatedInferenceService = &DedicatedInferenceServiceOp{} + +// DedicatedInferenceCreateRequest represents a request to create a Dedicated Inference. +type DedicatedInferenceCreateRequest struct { + Spec *DedicatedInferenceSpecRequest `json:"spec"` + Secrets *DedicatedInferenceSecrets `json:"secrets,omitempty"` +} + +// DedicatedInferenceSpecRequest represents the deployment specification in a create/update request. +type DedicatedInferenceSpecRequest struct { + Version int `json:"version"` + Name string `json:"name"` + Region string `json:"region"` + EnablePublicEndpoint bool `json:"enable_public_endpoint"` + VPC *DedicatedInferenceVPCRequest `json:"vpc"` + ModelDeployments []*DedicatedInferenceModelRequest `json:"model_deployments"` +} + +// DedicatedInferenceVPCRequest represents the VPC configuration in a request. +type DedicatedInferenceVPCRequest struct { + UUID string `json:"uuid"` +} + +// DedicatedInferenceModelRequest represents a model deployment in a request. +type DedicatedInferenceModelRequest struct { + ModelID string `json:"model_id,omitempty"` + ModelSlug string `json:"model_slug"` + ModelProvider string `json:"model_provider"` + WorkloadConfig *DedicatedInferenceWorkloadConfig `json:"workload_config,omitempty"` + Accelerators []*DedicatedInferenceAcceleratorRequest `json:"accelerators"` +} + +// DedicatedInferenceWorkloadConfig represents workload-specific configuration. +type DedicatedInferenceWorkloadConfig struct{} + +// DedicatedInferenceAcceleratorRequest represents an accelerator in a request. +type DedicatedInferenceAcceleratorRequest struct { + AcceleratorSlug string `json:"accelerator_slug"` + Scale uint64 `json:"scale"` + Type string `json:"type"` +} + +// DedicatedInferenceSecrets represents secrets for external model providers. +type DedicatedInferenceSecrets struct { + HuggingFaceToken string `json:"hugging_face_token,omitempty"` +} + +// DedicatedInferenceListOptions specifies optional parameters for listing Dedicated Inferences. +type DedicatedInferenceListOptions struct { + Region string `url:"region,omitempty"` + Name string `url:"name,omitempty"` + ListOptions +} + +// DedicatedInferenceListAcceleratorsOptions specifies optional parameters for listing accelerators. +type DedicatedInferenceListAcceleratorsOptions struct { + Slug string `url:"slug,omitempty"` + ListOptions +} + +// DedicatedInferenceUpdateRequest represents a request to update a Dedicated Inference. +type DedicatedInferenceUpdateRequest struct { + Spec *DedicatedInferenceSpecRequest `json:"spec"` + Secrets *DedicatedInferenceSecrets `json:"secrets,omitempty"` +} + +// DedicatedInferenceTokenCreateRequest represents a request to create an auth token. +type DedicatedInferenceTokenCreateRequest struct { + Name string `json:"name"` +} + +// -- Response types (what the API returns) -- + +// DedicatedInferenceListItem represents a Dedicated Inference item in a list response. +type DedicatedInferenceListItem struct { + ID string `json:"id"` + Name string `json:"name"` + Region string `json:"region"` + Status string `json:"status"` + VPCUUID string `json:"vpc_uuid"` + Endpoints *DedicatedInferenceEndpoints `json:"endpoints,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +// DedicatedInferenceAcceleratorInfo represents an accelerator in a list accelerators response. +type DedicatedInferenceAcceleratorInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` +} + +// DedicatedInference represents a Dedicated Inference resource returned by the API. +type DedicatedInference struct { + ID string `json:"id"` + Name string `json:"name"` + Region string `json:"region"` + Status string `json:"status"` + VPCUUID string `json:"vpc_uuid"` + Endpoints *DedicatedInferenceEndpoints `json:"endpoints,omitempty"` + DeploymentSpec *DedicatedInferenceDeployment `json:"spec,omitempty"` + PendingDeploymentSpec *DedicatedInferenceDeployment `json:"pending_deployment_spec,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +func (d DedicatedInference) String() string { + return Stringify(d) +} + +// DedicatedInferenceEndpoints represents the endpoints for a Dedicated Inference. +type DedicatedInferenceEndpoints struct { + PublicEndpointFQDN string `json:"public_endpoint_fqdn,omitempty"` + PrivateEndpointFQDN string `json:"private_endpoint_fqdn,omitempty"` +} + +// DedicatedInferenceDeployment represents a deployment spec in the API response. +type DedicatedInferenceDeployment struct { + Version uint64 `json:"version"` + ID string `json:"id"` + DedicatedInferenceID string `json:"dedicated_inference_id"` + State string `json:"state"` + EnablePublicEndpoint bool `json:"enable_public_endpoint"` + VPCConfig *DedicatedInferenceVPCConfig `json:"vpc_config,omitempty"` + ModelDeployments []*DedicatedInferenceModelDeployment `json:"model_deployments"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +// DedicatedInferenceVPCConfig represents the VPC config in an API response. +type DedicatedInferenceVPCConfig struct { + VPCUUID string `json:"vpc_uuid"` +} + +// DedicatedInferenceModelDeployment represents a model deployment in an API response. +type DedicatedInferenceModelDeployment struct { + ModelID string `json:"model_id"` + ModelSlug string `json:"model_slug"` + ModelProvider string `json:"model_provider"` + Accelerators []*DedicatedInferenceAccelerator `json:"accelerators"` +} + +// DedicatedInferenceAccelerator represents an accelerator in an API response. +type DedicatedInferenceAccelerator struct { + AcceleratorID string `json:"accelerator_id"` + AcceleratorSlug string `json:"accelerator_slug"` + State string `json:"state"` + Type string `json:"type"` + Scale uint64 `json:"scale"` +} + +// DedicatedInferenceToken represents an auth token returned on create. +type DedicatedInferenceToken struct { + ID string `json:"id"` + Name string `json:"name"` + Value string `json:"value,omitempty"` + CreatedAt time.Time `json:"created_at"` +} + +func (t DedicatedInferenceToken) String() string { + return Stringify(t) +} + +// DedicatedInferenceSizesResponse represents the response from GetSizes. +type DedicatedInferenceSizesResponse struct { + EnabledRegions []string `json:"enabled_regions"` + Sizes []*DedicatedInferenceSize `json:"sizes"` +} + +// DedicatedInferenceSize represents a GPU size with pricing information. +type DedicatedInferenceSize struct { + GPUSlug string `json:"gpu_slug"` + PricePerHour string `json:"price_per_hour"` + Regions []string `json:"regions"` + Currency string `json:"currency"` + CPU uint32 `json:"cpu"` + Memory uint32 `json:"memory"` + GPU *DedicatedInferenceSizeGPU `json:"gpu"` + SizeCategory *DedicatedInferenceSizeCategory `json:"size_category"` + Disks []*DedicatedInferenceSizeDisk `json:"disks"` +} + +// DedicatedInferenceSizeGPU represents GPU details in a size. +type DedicatedInferenceSizeGPU struct { + Count uint32 `json:"count"` + VramGb uint32 `json:"vram_gb"` + Slug string `json:"slug"` +} + +// DedicatedInferenceSizeCategory represents the category of a size. +type DedicatedInferenceSizeCategory struct { + Name string `json:"name"` + FleetName string `json:"fleet_name"` +} + +// DedicatedInferenceSizeDisk represents a disk in a size. +type DedicatedInferenceSizeDisk struct { + Type string `json:"type"` + SizeGb uint64 `json:"size_gb"` +} + +// DedicatedInferenceGPUModelConfigResponse represents the response from GetGPUModelConfig. +type DedicatedInferenceGPUModelConfigResponse struct { + GPUModelConfigs []*DedicatedInferenceGPUModelConfig `json:"gpu_model_configs"` +} + +// DedicatedInferenceGPUModelConfig represents a GPU model configuration. +type DedicatedInferenceGPUModelConfig struct { + GPUSlugs []string `json:"gpu_slugs"` + ModelSlug string `json:"model_slug"` + ModelName string `json:"model_name"` + IsModelGated bool `json:"is_model_gated"` +} + +// -- Root types for JSON deserialization -- + +type dedicatedInferenceRoot struct { + DedicatedInference *DedicatedInference `json:"dedicated_inference"` + Token *DedicatedInferenceToken `json:"token,omitempty"` +} + +type dedicatedInferencesRoot struct { + DedicatedInferences []DedicatedInferenceListItem `json:"dedicated_inferences"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type dedicatedInferenceAcceleratorsRoot struct { + Accelerators []DedicatedInferenceAcceleratorInfo `json:"accelerators"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type dedicatedInferenceTokenRoot struct { + Token *DedicatedInferenceToken `json:"token"` +} + +type dedicatedInferenceTokensRoot struct { + Tokens []DedicatedInferenceToken `json:"tokens"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +// -- Service methods -- + +// Create a new Dedicated Inference with the given configuration. +func (s *DedicatedInferenceServiceOp) Create(ctx context.Context, createRequest *DedicatedInferenceCreateRequest) (*DedicatedInference, *DedicatedInferenceToken, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodPost, dedicatedInferenceBasePath, createRequest) + if err != nil { + return nil, nil, nil, err + } + + root := new(dedicatedInferenceRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, nil, resp, err + } + + return root.DedicatedInference, root.Token, resp, nil +} + +// Get an existing Dedicated Inference by its UUID. +func (s *DedicatedInferenceServiceOp) Get(ctx context.Context, id string) (*DedicatedInference, *Response, error) { + path := fmt.Sprintf("%s/%s", dedicatedInferenceBasePath, id) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.DedicatedInference, resp, nil +} + +// Delete an existing Dedicated Inference by its UUID. +func (s *DedicatedInferenceServiceOp) Delete(ctx context.Context, id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", dedicatedInferenceBasePath, id) + + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// Update an existing Dedicated Inference. +func (s *DedicatedInferenceServiceOp) Update(ctx context.Context, id string, updateRequest *DedicatedInferenceUpdateRequest) (*DedicatedInference, *Response, error) { + path := fmt.Sprintf("%s/%s", dedicatedInferenceBasePath, id) + + req, err := s.client.NewRequest(ctx, http.MethodPatch, path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.DedicatedInference, resp, nil +} + +// List all Dedicated Inferences. +func (s *DedicatedInferenceServiceOp) List(ctx context.Context, opt *DedicatedInferenceListOptions) ([]DedicatedInferenceListItem, *Response, error) { + path, err := addOptions(dedicatedInferenceBasePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferencesRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.DedicatedInferences, resp, nil +} + +// ListAccelerators lists accelerators for a Dedicated Inference. +func (s *DedicatedInferenceServiceOp) ListAccelerators(ctx context.Context, diID string, opt *DedicatedInferenceListAcceleratorsOptions) ([]DedicatedInferenceAcceleratorInfo, *Response, error) { + basePath := fmt.Sprintf("%s/%s/accelerators", dedicatedInferenceBasePath, diID) + path, err := addOptions(basePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceAcceleratorsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.Accelerators, resp, nil +} + +// CreateToken creates a new auth token for a Dedicated Inference. +func (s *DedicatedInferenceServiceOp) CreateToken(ctx context.Context, diID string, createRequest *DedicatedInferenceTokenCreateRequest) (*DedicatedInferenceToken, *Response, error) { + path := fmt.Sprintf("%s/%s/tokens", dedicatedInferenceBasePath, diID) + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceTokenRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Token, resp, nil +} + +// ListTokens lists all auth tokens for a Dedicated Inference. +func (s *DedicatedInferenceServiceOp) ListTokens(ctx context.Context, diID string, opt *ListOptions) ([]DedicatedInferenceToken, *Response, error) { + basePath := fmt.Sprintf("%s/%s/tokens", dedicatedInferenceBasePath, diID) + path, err := addOptions(basePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dedicatedInferenceTokensRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.Tokens, resp, nil +} + +// RevokeToken revokes (deletes) an auth token for a Dedicated Inference. +func (s *DedicatedInferenceServiceOp) RevokeToken(ctx context.Context, diID string, tokenID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/tokens/%s", dedicatedInferenceBasePath, diID, tokenID) + + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// GetSizes returns available Dedicated Inference sizes and pricing. +func (s *DedicatedInferenceServiceOp) GetSizes(ctx context.Context) (*DedicatedInferenceSizesResponse, *Response, error) { + path := fmt.Sprintf("%s/sizes", dedicatedInferenceBasePath) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(DedicatedInferenceSizesResponse) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root, resp, nil +} + +// GetGPUModelConfig returns supported GPU model configurations. +func (s *DedicatedInferenceServiceOp) GetGPUModelConfig(ctx context.Context) (*DedicatedInferenceGPUModelConfigResponse, *Response, error) { + path := fmt.Sprintf("%s/gpu-model-config", dedicatedInferenceBasePath) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(DedicatedInferenceGPUModelConfigResponse) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index c3fbd9d31..eb14dda0a 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.175.0" + libraryVersion = "1.177.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -100,6 +100,7 @@ type Client struct { VPCs VPCsService PartnerAttachment PartnerAttachmentService GradientAI GradientAIService + DedicatedInference DedicatedInferenceService BYOIPPrefixes BYOIPPrefixesService // Optional function called after every successful request made to the DO APIs onRequestCompleted RequestCompletionCallback @@ -331,6 +332,7 @@ func NewClient(httpClient *http.Client) *Client { c.VPCs = &VPCsServiceOp{client: c} c.PartnerAttachment = &PartnerAttachmentServiceOp{client: c} c.GradientAI = &GradientAIServiceOp{client: c} + c.DedicatedInference = &DedicatedInferenceServiceOp{client: c} c.headers = make(map[string]string) diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index f68291d58..15a8321a9 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -76,8 +76,9 @@ type KubernetesClusterCreateRequest struct { ClusterSubnet string `json:"cluster_subnet,omitempty"` ServiceSubnet string `json:"service_subnet,omitempty"` - // Create cluster with highly available control plane - HA bool `json:"ha"` + // HA enables a highly available control plane. When omitted, the API applies + // version-based defaults: false for versions <= 1.36, true for versions > 1.36. + HA *bool `json:"ha,omitempty"` NodePools []*KubernetesNodePoolCreateRequest `json:"node_pools,omitempty"` @@ -91,6 +92,7 @@ type KubernetesClusterCreateRequest struct { AmdGpuDeviceMetricsExporterPlugin *KubernetesAmdGpuDeviceMetricsExporterPlugin `json:"amd_gpu_device_metrics_exporter_plugin,omitempty"` NvidiaGpuDevicePlugin *KubernetesNvidiaGpuDevicePlugin `json:"nvidia_gpu_device_plugin,omitempty"` RdmaSharedDevicePlugin *KubernetesRdmaSharedDevicePlugin `json:"rdma_shared_dev_plugin,omitempty"` + SSO *KubernetesClusterSSO `json:"sso,omitempty"` } // KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster. @@ -107,6 +109,7 @@ type KubernetesClusterUpdateRequest struct { AmdGpuDeviceMetricsExporterPlugin *KubernetesAmdGpuDeviceMetricsExporterPlugin `json:"amd_gpu_device_metrics_exporter_plugin,omitempty"` NvidiaGpuDevicePlugin *KubernetesNvidiaGpuDevicePlugin `json:"nvidia_gpu_device_plugin,omitempty"` RdmaSharedDevicePlugin *KubernetesRdmaSharedDevicePlugin `json:"rdma_shared_dev_plugin,omitempty"` + SSO *KubernetesClusterSSO `json:"sso,omitempty"` // Convert cluster to run highly available control plane HA *bool `json:"ha,omitempty"` @@ -244,6 +247,7 @@ type KubernetesCluster struct { AmdGpuDeviceMetricsExporterPlugin *KubernetesAmdGpuDeviceMetricsExporterPlugin `json:"amd_gpu_device_metrics_exporter_plugin,omitempty"` NvidiaGpuDevicePlugin *KubernetesNvidiaGpuDevicePlugin `json:"nvidia_gpu_device_plugin,omitempty"` RdmaSharedDevicePlugin *KubernetesRdmaSharedDevicePlugin `json:"rdma_shared_dev_plugin,omitempty"` + SSO *KubernetesClusterSSO `json:"sso,omitempty"` Status *KubernetesClusterStatus `json:"status,omitempty"` CreatedAt time.Time `json:"created_at,omitempty"` @@ -321,6 +325,14 @@ type KubernetesRdmaSharedDevicePlugin struct { Enabled *bool `json:"enabled"` } +// KubernetesClusterSSO configures Single Sign-On (SSO) for a Kubernetes cluster. +// Identity Provider (IDP) settings for SSO are set up on the team level, +// whereas on a per-cluster level, users can enable or require SSO for the cluster. +type KubernetesClusterSSO struct { + Enabled *bool `json:"enabled,omitempty"` + Required *bool `json:"required,omitempty"` +} + // KubernetesMaintenancePolicyDay represents the possible days of a maintenance // window type KubernetesMaintenancePolicyDay int diff --git a/vendor/github.com/digitalocean/godo/monitoring.go b/vendor/github.com/digitalocean/godo/monitoring.go index 00feb2565..e4784fecf 100644 --- a/vendor/github.com/digitalocean/godo/monitoring.go +++ b/vendor/github.com/digitalocean/godo/monitoring.go @@ -14,6 +14,7 @@ const ( alertPolicyBasePath = monitoringBasePath + "/alerts" dropletMetricsBasePath = monitoringBasePath + "/metrics/droplet" loadBalancerMetricsBasePath = monitoringBasePath + "/metrics/load_balancer" + dbaasMysqlMetricsBasePath = monitoringBasePath + "/metrics/database/mysql" DropletCPUUtilizationPercent = "v1/insights/droplet/cpu" DropletMemoryUtilizationPercent = "v1/insights/droplet/memory_utilization_percent" @@ -96,6 +97,21 @@ type MonitoringService interface { GetLoadBalancerDropletsConnections(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) GetLoadBalancerDropletsHealthChecks(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) GetLoadBalancerDropletsDowntime(ctx context.Context, args *LoadBalancerMetricsRequest) (*MetricsResponse, *Response, error) + + // DBaaS MySQL metrics (host-level: db_id only) + GetDbaasMysqlCpuUsage(ctx context.Context, args *DbaasMysqlCpuUsageRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlLoad(ctx context.Context, args *DbaasMysqlLoadRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlMemoryUsage(ctx context.Context, args *DbaasMysqlMemoryUsageRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlDiskUsage(ctx context.Context, args *DbaasMysqlDiskUsageRequest) (*MetricsResponse, *Response, error) + // DBaaS MySQL metrics (service-level: db_id + service) + GetDbaasMysqlThreadsConnected(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlThreadsCreatedRate(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlThreadsActive(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlIndexVsSequentialReads(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlOpRates(ctx context.Context, args *DbaasMysqlOpRatesRequest) (*MetricsResponse, *Response, error) + // DBaaS MySQL metrics (schema-level: db_id + service + schema) + GetDbaasMysqlSchemaThroughput(ctx context.Context, args *DbaasMysqlSchemaThroughputRequest) (*MetricsResponse, *Response, error) + GetDbaasMysqlSchemaLatency(ctx context.Context, args *DbaasMysqlSchemaLatencyRequest) (*MetricsResponse, *Response, error) } // MonitoringServiceOp handles communication with monitoring related methods of the @@ -199,6 +215,73 @@ type LoadBalancerMetricsRequest struct { End time.Time } +// DbaasMysqlMetricsRequest holds the information needed to retrieve DBaaS MySQL host-level metrics (db_id only). +type DbaasMysqlMetricsRequest struct { + DBID string + Start time.Time + End time.Time +} + +// DbaasMysqlCpuUsageRequest holds the information needed to retrieve MySQL cluster CPU usage (percent). Aggregate: avg, max, min. +type DbaasMysqlCpuUsageRequest struct { + DbaasMysqlMetricsRequest + Aggregate string // avg, max, min +} + +// DbaasMysqlLoadRequest holds the information needed to retrieve MySQL cluster load average. Metric: load1, load5, load15. Aggregate: avg, max. +type DbaasMysqlLoadRequest struct { + DbaasMysqlMetricsRequest + Metric string // load1, load5, load15 + Aggregate string // avg, max +} + +// DbaasMysqlMemoryUsageRequest holds the information needed to retrieve MySQL cluster memory usage (percent). Aggregate: avg, max, min. +type DbaasMysqlMemoryUsageRequest struct { + DbaasMysqlMetricsRequest + Aggregate string // avg, max, min +} + +// DbaasMysqlDiskUsageRequest holds the information needed to retrieve MySQL cluster disk usage (percent). Aggregate: avg, max, min. +type DbaasMysqlDiskUsageRequest struct { + DbaasMysqlMetricsRequest + Aggregate string // avg, max, min +} + +// DbaasMysqlServiceMetricsRequest holds the information needed to retrieve DBaaS MySQL service-level metrics (db_id + service). +type DbaasMysqlServiceMetricsRequest struct { + DBID string + Service string + Start time.Time + End time.Time +} + +// DbaasMysqlOpRatesRequest holds the information needed to retrieve MySQL service operations rate. Metric: select, insert, update, delete. +type DbaasMysqlOpRatesRequest struct { + DbaasMysqlServiceMetricsRequest + Metric string // select, insert, update, delete +} + +// DbaasMysqlSchemaMetricsRequest holds the information needed to retrieve DBaaS MySQL schema-level metrics (db_id + service + schema). +type DbaasMysqlSchemaMetricsRequest struct { + DBID string + Service string + Schema string + Start time.Time + End time.Time +} + +// DbaasMysqlSchemaThroughputRequest holds the information needed to retrieve MySQL schema table I/O throughput (rows/s). Metric: insert, fetch, update, delete. +type DbaasMysqlSchemaThroughputRequest struct { + DbaasMysqlSchemaMetricsRequest + Metric string // insert, fetch, update, delete +} + +// DbaasMysqlSchemaLatencyRequest holds the information needed to retrieve MySQL schema table I/O latency (seconds). Metric: insert, fetch, update, delete. +type DbaasMysqlSchemaLatencyRequest struct { + DbaasMysqlSchemaMetricsRequest + Metric string // insert, fetch, update, delete +} + // MetricsResponse holds a Metrics query response. type MetricsResponse struct { Status string `json:"status"` @@ -562,3 +645,147 @@ func (s *MonitoringServiceOp) getLoadBalancerMetrics(ctx context.Context, path s return root, resp, err } + +// getDbaasMysqlMetrics performs a GET request for a DBaaS MySQL metric path with the given query params. +func (s *MonitoringServiceOp) getDbaasMysqlMetrics(ctx context.Context, path string, params map[string]string) (*MetricsResponse, *Response, error) { + fullPath := dbaasMysqlMetricsBasePath + path + req, err := s.client.NewRequest(ctx, http.MethodGet, fullPath, nil) + if err != nil { + return nil, nil, err + } + q := req.URL.Query() + for k, v := range params { + q.Add(k, v) + } + req.URL.RawQuery = q.Encode() + root := new(MetricsResponse) + resp, err := s.client.Do(ctx, req, root) + return root, resp, err +} + +// GetDbaasMysqlCpuUsage retrieves CPU usage (percent) for a MySQL cluster. Aggregate: avg, max, min. +func (s *MonitoringServiceOp) GetDbaasMysqlCpuUsage(ctx context.Context, args *DbaasMysqlCpuUsageRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + "aggregate": args.Aggregate, + } + return s.getDbaasMysqlMetrics(ctx, "/cpu_usage", params) +} + +// GetDbaasMysqlLoad retrieves load average for a MySQL cluster. Metric: load1, load5, load15. Aggregate: avg, max. +func (s *MonitoringServiceOp) GetDbaasMysqlLoad(ctx context.Context, args *DbaasMysqlLoadRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + "metric": args.Metric, + "aggregate": args.Aggregate, + } + return s.getDbaasMysqlMetrics(ctx, "/load", params) +} + +// GetDbaasMysqlMemoryUsage retrieves memory usage (percent) for a MySQL cluster. Aggregate: avg, max, min. +func (s *MonitoringServiceOp) GetDbaasMysqlMemoryUsage(ctx context.Context, args *DbaasMysqlMemoryUsageRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + "aggregate": args.Aggregate, + } + return s.getDbaasMysqlMetrics(ctx, "/memory_usage", params) +} + +// GetDbaasMysqlDiskUsage retrieves disk usage (percent) for a MySQL cluster. Aggregate: avg, max, min. +func (s *MonitoringServiceOp) GetDbaasMysqlDiskUsage(ctx context.Context, args *DbaasMysqlDiskUsageRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + "aggregate": args.Aggregate, + } + return s.getDbaasMysqlMetrics(ctx, "/disk_usage", params) +} + +// GetDbaasMysqlThreadsConnected retrieves current threads connected for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlThreadsConnected(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/threads_connected", params) +} + +// GetDbaasMysqlThreadsCreatedRate retrieves threads created rate (per second) for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlThreadsCreatedRate(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/threads_created_rate", params) +} + +// GetDbaasMysqlThreadsActive retrieves active (running) threads for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlThreadsActive(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/threads_active", params) +} + +// GetDbaasMysqlIndexVsSequentialReads retrieves index vs sequential reads ratio (percent) for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlIndexVsSequentialReads(ctx context.Context, args *DbaasMysqlServiceMetricsRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/index_vs_sequential_reads", params) +} + +// GetDbaasMysqlOpRates retrieves operations rate (select, insert, update, delete per second) for a MySQL service. +func (s *MonitoringServiceOp) GetDbaasMysqlOpRates(ctx context.Context, args *DbaasMysqlOpRatesRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "metric": args.Metric, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/op_rates", params) +} + +// GetDbaasMysqlSchemaThroughput retrieves table I/O throughput (rows/s) for a MySQL schema. Metric: insert, fetch, update, delete. +func (s *MonitoringServiceOp) GetDbaasMysqlSchemaThroughput(ctx context.Context, args *DbaasMysqlSchemaThroughputRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "schema": args.Schema, + "metric": args.Metric, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/schema_throughput", params) +} + +// GetDbaasMysqlSchemaLatency retrieves table I/O latency (seconds) for a MySQL schema. Metric: insert, fetch, update, delete. +func (s *MonitoringServiceOp) GetDbaasMysqlSchemaLatency(ctx context.Context, args *DbaasMysqlSchemaLatencyRequest) (*MetricsResponse, *Response, error) { + params := map[string]string{ + "db_id": args.DBID, + "service": args.Service, + "schema": args.Schema, + "metric": args.Metric, + "start": fmt.Sprintf("%d", args.Start.Unix()), + "end": fmt.Sprintf("%d", args.End.Unix()), + } + return s.getDbaasMysqlMetrics(ctx, "/schema_latency", params) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2cd3ef0d7..1291ed421 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -61,7 +61,7 @@ github.com/creack/pty # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.175.0 +# github.com/digitalocean/godo v1.175.0 => ../godo ## explicit; go 1.23.0 github.com/digitalocean/godo github.com/digitalocean/godo/metrics @@ -630,3 +630,4 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml +# github.com/digitalocean/godo => ../godo