diff --git a/.golangci.yml b/.golangci.yml index 4b406adb2ce1a9..5eaa0f1448a61f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -19,6 +19,7 @@ linters: - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - ineffassign # Detects when assignments to existing variables are not used - misspell # Finds commonly misspelled English words in comments + - perfsprint # replace uses of fmt.Sprintf and fmt.Errorf with better (both in CPU and memory) alternatives - pkgconfigusage # Linter for checking usage of pkgconfig inside components folder - revive # Revive is a replacement for golint, a coding style checker - staticcheck # staticcheck is a go vet on steroids, applying a ton of static analysis checks diff --git a/cmd/agent/common/import.go b/cmd/agent/common/import.go index 1b53565fd975f8..d1fc463705bbbc 100644 --- a/cmd/agent/common/import.go +++ b/cmd/agent/common/import.go @@ -8,6 +8,7 @@ package common import ( + "errors" "fmt" "os" "os/user" @@ -230,7 +231,7 @@ func copyFile(src, dst string, overwrite bool, transformations []TransformationF return fmt.Errorf("unable to create a backup copy of the destination file: %v", err) } } else { - return fmt.Errorf("destination file already exists, run the command again with --force or -f to overwrite it") + return errors.New("destination file already exists, run the command again with --force or -f to overwrite it") } } diff --git a/cmd/agent/subcommands/analyzelogs/command.go b/cmd/agent/subcommands/analyzelogs/command.go index 665575c9229b6f..a94db222f9b720 100644 --- a/cmd/agent/subcommands/analyzelogs/command.go +++ b/cmd/agent/subcommands/analyzelogs/command.go @@ -9,6 +9,7 @@ package analyzelogs import ( "context" "encoding/json" + "errors" "fmt" "os" "time" @@ -72,7 +73,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { Long: `Run a Datadog agent logs configuration and print the results to stdout`, RunE: func(_ *cobra.Command, args []string) error { if len(args) < 1 { - return fmt.Errorf("log config file path is required") + return errors.New("log config file path is required") } cliParams.LogConfigPath = args[0] return fxutil.OneShot(runAnalyzeLogs, @@ -214,5 +215,5 @@ func resolveCheckConfig(ac autodiscovery.Component, cliParams *CliParams) ([]*so } return sources, nil } - return nil, fmt.Errorf("Cannot get source") + return nil, errors.New("Cannot get source") } diff --git a/cmd/agent/subcommands/dogstatsdcapture/command.go b/cmd/agent/subcommands/dogstatsdcapture/command.go index e7b1d337737ae5..afe165d876b641 100644 --- a/cmd/agent/subcommands/dogstatsdcapture/command.go +++ b/cmd/agent/subcommands/dogstatsdcapture/command.go @@ -85,7 +85,7 @@ func dogstatsdCapture(_ log.Component, config config.Component, cliParams *cliPa defer cancel() md := metadata.MD{ - "authorization": []string{fmt.Sprintf("Bearer %s", ipc.GetAuthToken())}, // TODO IPC: replace with GRPC Client + "authorization": []string{"Bearer " + ipc.GetAuthToken()}, // TODO IPC: replace with GRPC Client } ctx = metadata.NewOutgoingContext(ctx, md) diff --git a/cmd/agent/subcommands/dogstatsdreplay/command.go b/cmd/agent/subcommands/dogstatsdreplay/command.go index 34cffcab47a8a5..0c5a43a2a8b5a7 100644 --- a/cmd/agent/subcommands/dogstatsdreplay/command.go +++ b/cmd/agent/subcommands/dogstatsdreplay/command.go @@ -8,6 +8,7 @@ package dogstatsdreplay import ( "context" + "errors" "fmt" "net" "os" @@ -95,7 +96,7 @@ func dogstatsdReplay(_ log.Component, config config.Component, cliParams *cliPar fmt.Printf("Replaying dogstatsd traffic...\n\n") md := metadata.MD{ - "authorization": []string{fmt.Sprintf("Bearer %s", ipc.GetAuthToken())}, + "authorization": []string{"Bearer " + ipc.GetAuthToken()}, } ctx = metadata.NewOutgoingContext(ctx, md) @@ -123,7 +124,7 @@ func dogstatsdReplay(_ log.Component, config config.Component, cliParams *cliPar s := pkgconfigsetup.Datadog().GetString("dogstatsd_socket") if s == "" { - return fmt.Errorf("Dogstatsd UNIX socket disabled") + return errors.New("Dogstatsd UNIX socket disabled") } addr, err := net.ResolveUnixAddr("unixgram", s) diff --git a/cmd/agent/subcommands/import/command.go b/cmd/agent/subcommands/import/command.go index 09fc4209156c39..d5674c40de89e1 100644 --- a/cmd/agent/subcommands/import/command.go +++ b/cmd/agent/subcommands/import/command.go @@ -7,6 +7,7 @@ package cmdimport import ( + "errors" "fmt" "os" @@ -56,7 +57,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { func importCmd(cliParams *cliParams) error { if len(cliParams.args) != 2 { - return fmt.Errorf("please provide all the required arguments") + return errors.New("please provide all the required arguments") } if cliParams.ConfFilePath != "" { diff --git a/cmd/agent/subcommands/integrations/command.go b/cmd/agent/subcommands/integrations/command.go index d7b618c08daaa8..1faff0e9bd6556 100644 --- a/cmd/agent/subcommands/integrations/command.go +++ b/cmd/agent/subcommands/integrations/command.go @@ -131,7 +131,7 @@ You must specify a version of the package to install using the syntax: }, } installCmd.Flags().BoolVarP( - &cliParams.localWheel, "local-wheel", "w", false, fmt.Sprintf("install an agent check from a locally available wheel file. %s", disclaimer), + &cliParams.localWheel, "local-wheel", "w", false, "install an agent check from a locally available wheel file. "+disclaimer, ) installCmd.Flags().BoolVarP( &cliParams.thirdParty, "third-party", "t", false, "install a community or vendor-contributed integration", @@ -288,9 +288,9 @@ func getCommandPython(useSysPython bool) (string, error) { func validateArgs(args []string, local bool) error { if len(args) > 1 { - return fmt.Errorf("Too many arguments") + return errors.New("Too many arguments") } else if len(args) == 0 { - return fmt.Errorf("Missing package argument") + return errors.New("Missing package argument") } if !local { @@ -323,7 +323,7 @@ func pip(cliParams *cliParams, args []string, stdout io.Writer, stderr io.Writer args = append([]string{"-mpip"}, cmd) if cliParams.verbose > 0 { - args = append(args, fmt.Sprintf("-%s", strings.Repeat("v", cliParams.verbose))) + args = append(args, "-"+strings.Repeat("v", cliParams.verbose)) } // Append implicit flags to the *pip* command @@ -420,22 +420,20 @@ func install(cliParams *cliParams, _ log.Component) error { return fmt.Errorf("Some errors prevented moving %s configuration files: %v", integration, err) } - fmt.Println(color.GreenString(fmt.Sprintf( - "Successfully completed the installation of %s", integration, - ))) + fmt.Println(color.GreenString("Successfully completed the installation of " + integration)) return nil } // Additional verification for installation if len(strings.Split(cliParams.args[0], "==")) != 2 { - return fmt.Errorf("you must specify a version to install with ==") + return errors.New("you must specify a version to install with ==") } intVer := strings.Split(cliParams.args[0], "==") integration := normalizePackageName(strings.TrimSpace(intVer[0])) if integration == "datadog-checks-base" { - return fmt.Errorf("this command does not allow installing datadog-checks-base") + return errors.New("this command does not allow installing datadog-checks-base") } versionToInstall, err := semver.NewVersion(strings.TrimSpace(intVer[1])) if err != nil || versionToInstall == nil { @@ -517,7 +515,7 @@ func downloadWheel(cliParams *cliParams, integration, version, rootLayoutType st "--type", rootLayoutType, } if cliParams.verbose > 0 { - args = append(args, fmt.Sprintf("-%s", strings.Repeat("v", cliParams.verbose))) + args = append(args, "-"+strings.Repeat("v", cliParams.verbose)) } if cliParams.unsafeDisableVerification { @@ -554,9 +552,9 @@ func downloadWheel(cliParams *cliParams, integration, version, rootLayoutType st proxies := pkgconfigsetup.Datadog().GetProxies() if proxies != nil { downloaderCmd.Env = append(downloaderCmd.Env, - fmt.Sprintf("HTTP_PROXY=%s", proxies.HTTP), - fmt.Sprintf("HTTPS_PROXY=%s", proxies.HTTPS), - fmt.Sprintf("NO_PROXY=%s", strings.Join(proxies.NoProxy, ",")), + "HTTP_PROXY="+proxies.HTTP, + "HTTPS_PROXY="+proxies.HTTPS, + "NO_PROXY="+strings.Join(proxies.NoProxy, ","), ) } @@ -790,7 +788,7 @@ func getVersionFromReqLine(integration string, lines string) (*semver.Version, b func moveConfigurationFilesOf(cliParams *cliParams, integration string) error { confFolder := pkgconfigsetup.Datadog().GetString("confd_path") check := getIntegrationName(integration) - confFileDest := filepath.Join(confFolder, fmt.Sprintf("%s.d", check)) + confFileDest := filepath.Join(confFolder, check+".d") if err := os.MkdirAll(confFileDest, os.ModeDir|0755); err != nil { return err } @@ -845,9 +843,7 @@ func moveConfigurationFiles(srcFolder string, dstFolder string) error { errorMsg = fmt.Sprintf("%s\nError writing configuration file %s: %v", errorMsg, dst, err) continue } - fmt.Println(color.GreenString(fmt.Sprintf( - "Successfully copied configuration file %s", filename, - ))) + fmt.Println(color.GreenString("Successfully copied configuration file " + filename)) } if errorMsg != "" { return errors.New(errorMsg) diff --git a/cmd/agent/subcommands/integrations/integrations_darwin.go b/cmd/agent/subcommands/integrations/integrations_darwin.go index b552738fc47974..c203ea5facd612 100644 --- a/cmd/agent/subcommands/integrations/integrations_darwin.go +++ b/cmd/agent/subcommands/integrations/integrations_darwin.go @@ -8,13 +8,13 @@ package integrations import ( - "fmt" + "errors" "os" ) func validateUser(_ bool) error { if os.Geteuid() != 0 { - return fmt.Errorf("please run this tool with the root user") + return errors.New("please run this tool with the root user") } return nil } diff --git a/cmd/agent/subcommands/integrations/integrations_nix.go b/cmd/agent/subcommands/integrations/integrations_nix.go index 416e5ead25bf31..48f59308c9989c 100644 --- a/cmd/agent/subcommands/integrations/integrations_nix.go +++ b/cmd/agent/subcommands/integrations/integrations_nix.go @@ -8,13 +8,13 @@ package integrations import ( - "fmt" + "errors" "os" ) func validateUser(allowRoot bool) error { if os.Geteuid() == 0 && !allowRoot { - return fmt.Errorf("operation is disabled for root user. Please run this tool with the agent-running user or add '--allow-root/-r' to force") + return errors.New("operation is disabled for root user. Please run this tool with the agent-running user or add '--allow-root/-r' to force") } return nil } diff --git a/cmd/agent/subcommands/integrations/integrations_nix_helpers.go b/cmd/agent/subcommands/integrations/integrations_nix_helpers.go index de34a15fff0cd6..365ae4475ae14d 100644 --- a/cmd/agent/subcommands/integrations/integrations_nix_helpers.go +++ b/cmd/agent/subcommands/integrations/integrations_nix_helpers.go @@ -24,7 +24,7 @@ var ( ) func getRelPyPath() string { - return filepath.Join("embedded", "bin", fmt.Sprintf("%s3", pythonBin)) + return filepath.Join("embedded", "bin", pythonBin+"3") } func getRelChecksPath(cliParams *cliParams) (string, error) { diff --git a/cmd/agent/subcommands/launchgui/command.go b/cmd/agent/subcommands/launchgui/command.go index a12fe52cc2b8e3..c8e6128fa6df30 100644 --- a/cmd/agent/subcommands/launchgui/command.go +++ b/cmd/agent/subcommands/launchgui/command.go @@ -7,6 +7,7 @@ package launchgui import ( + "errors" "fmt" "net" @@ -56,7 +57,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { func launchGui(config config.Component, _ *cliParams, _ log.Component, client ipc.HTTPClient) error { guiPort := config.GetString("GUI_port") if guiPort == "-1" { - return fmt.Errorf("GUI not enabled: to enable, please set an appropriate port in your datadog.yaml file") + return errors.New("GUI not enabled: to enable, please set an appropriate port in your datadog.yaml file") } // 'http://localhost' is preferred over 'http://127.0.0.1' due to Internet Explorer behavior. diff --git a/cmd/agent/subcommands/remoteconfig/command.go b/cmd/agent/subcommands/remoteconfig/command.go index 1e90322040e827..f26e2f7f2b7e01 100644 --- a/cmd/agent/subcommands/remoteconfig/command.go +++ b/cmd/agent/subcommands/remoteconfig/command.go @@ -94,7 +94,7 @@ func reset(_ *cliParams, config config.Component, ipc ipc.Component) error { ctx, closeFn := context.WithCancel(context.Background()) defer closeFn() md := metadata.MD{ - "authorization": []string{fmt.Sprintf("Bearer %s", ipc.GetAuthToken())}, + "authorization": []string{"Bearer " + ipc.GetAuthToken()}, } ctx = metadata.NewOutgoingContext(ctx, md) @@ -126,7 +126,7 @@ func state(_ *cliParams, config config.Component, ipc ipc.Component) error { ctx, closeFn := context.WithCancel(context.Background()) defer closeFn() md := metadata.MD{ - "authorization": []string{fmt.Sprintf("Bearer %s", ipc.GetAuthToken())}, // TODO IPC: use GRPC client + "authorization": []string{"Bearer " + ipc.GetAuthToken()}, // TODO IPC: use GRPC client } ctx = metadata.NewOutgoingContext(ctx, md) diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 84d62291442028..8a341057e928e7 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -8,8 +8,8 @@ package run import ( "context" + "errors" _ "expvar" // Blank import used because this isn't directly used in this file - "fmt" "net/http" _ "net/http/pprof" // Blank import used because this isn't directly used in this file "os" @@ -321,7 +321,7 @@ func run(log log.Component, stopCh <- nil case <-signals.ErrorStopper: _ = log.Critical("The Agent has encountered an error, shutting down...") - stopCh <- fmt.Errorf("shutting down because of an error") + stopCh <- errors.New("shutting down because of an error") case sig := <-signalCh: log.Infof("Received signal '%s', shutting down...", sig) stopCh <- nil diff --git a/cmd/agent/subcommands/status/command.go b/cmd/agent/subcommands/status/command.go index 58cf961b385793..55bd37b12b006b 100644 --- a/cmd/agent/subcommands/status/command.go +++ b/cmd/agent/subcommands/status/command.go @@ -192,7 +192,7 @@ func requestStatus(cliParams *cliParams, client ipc.HTTPClient) error { func componentStatusCmd(_ log.Component, cliParams *cliParams, client ipc.HTTPClient) error { if len(cliParams.args) > 1 { - return fmt.Errorf("only one section must be specified") + return errors.New("only one section must be specified") } return redactError(componentStatus(cliParams, cliParams.args[0], client)) diff --git a/cmd/agent/subcommands/streamlogs/command.go b/cmd/agent/subcommands/streamlogs/command.go index 503c40550de33b..c2b5ed3a9796e7 100644 --- a/cmd/agent/subcommands/streamlogs/command.go +++ b/cmd/agent/subcommands/streamlogs/command.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "io" "os" @@ -79,7 +80,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { // PreRunE is used to validate duration before stream-logs is run. cmd.PreRunE = func(_ *cobra.Command, _ []string) error { if cliParams.Duration < 0 { - return fmt.Errorf("duration must be a positive value") + return errors.New("duration must be a positive value") } return nil } diff --git a/cmd/cluster-agent-cloudfoundry/command/command.go b/cmd/cluster-agent-cloudfoundry/command/command.go index f9fc68959a6ab1..5793cd67d3dc17 100644 --- a/cmd/cluster-agent-cloudfoundry/command/command.go +++ b/cmd/cluster-agent-cloudfoundry/command/command.go @@ -9,7 +9,6 @@ package command import ( - "fmt" "os" "github.com/fatih/color" @@ -47,7 +46,7 @@ func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { // AgentCmd is the root command agentCmd := &cobra.Command{ - Use: fmt.Sprintf("%s [command]", os.Args[0]), + Use: os.Args[0] + " [command]", Short: "Datadog Cluster Agent for Cloud Foundry at your service.", Long: ` Datadog Cluster Agent for Cloud Foundry takes care of running checks that need to run only diff --git a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go index b7a213044388ee..2aefd876b77ec8 100644 --- a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go +++ b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go @@ -10,6 +10,7 @@ package run import ( "context" + "errors" "fmt" "os" "os/signal" @@ -202,7 +203,7 @@ func run( } pkglog.Infof("Hostname is: %s", hname) - demultiplexer.AddAgentStartupTelemetry(fmt.Sprintf("%s - Datadog Cluster Agent", version.AgentVersion)) + demultiplexer.AddAgentStartupTelemetry(version.AgentVersion + " - Datadog Cluster Agent") pkglog.Infof("Datadog Cluster Agent is now running.") @@ -338,7 +339,7 @@ func initializeBBSCache(ctx context.Context) error { } case <-timer.C: ticker.Stop() - return fmt.Errorf("BBS Cache failed to warm up. Misconfiguration error? Inspect logs") + return errors.New("BBS Cache failed to warm up. Misconfiguration error? Inspect logs") } } } diff --git a/cmd/cluster-agent/api/server_test.go b/cmd/cluster-agent/api/server_test.go index fe34d07d92126a..c1ebd319fc9019 100644 --- a/cmd/cluster-agent/api/server_test.go +++ b/cmd/cluster-agent/api/server_test.go @@ -66,7 +66,7 @@ func TestValidateTokenMiddleware(t *testing.T) { req, err := http.NewRequest("GET", tt.path, nil) require.NoError(t, err) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tt.authToken)) + req.Header.Add("Authorization", "Bearer "+tt.authToken) rr := httptest.NewRecorder() diff --git a/cmd/cluster-agent/api/v1/kubernetes_metadata_test.go b/cmd/cluster-agent/api/v1/kubernetes_metadata_test.go index 0ea9ad4e9806c0..8b49770bb5bda0 100644 --- a/cmd/cluster-agent/api/v1/kubernetes_metadata_test.go +++ b/cmd/cluster-agent/api/v1/kubernetes_metadata_test.go @@ -67,7 +67,7 @@ func TestGetNodeAnnotations(t *testing.T) { }{ { name: "no filters passed only host aliases annotations returned", - path: fmt.Sprintf("/annotations/node/%s", testNode), + path: "/annotations/node/" + testNode, muxVars: map[string]string{"nodeName": testNode}, body: map[string]string{"annotation1": "abc"}, // hardcoded above in workloadmeta mock status: http.StatusOK, @@ -168,7 +168,7 @@ func TestGetNodeUID(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - req, _ := http.NewRequest("GET", fmt.Sprintf("/uid/node/%s", tt.node), nil) + req, _ := http.NewRequest("GET", "/uid/node/"+tt.node, nil) req = mux.SetURLVars(req, map[string]string{"nodeName": tt.node}) respw := httptest.NewRecorder() diff --git a/cmd/cluster-agent/command/command.go b/cmd/cluster-agent/command/command.go index 8077a68c0f4621..d23e1c5fe8bdfe 100644 --- a/cmd/cluster-agent/command/command.go +++ b/cmd/cluster-agent/command/command.go @@ -9,7 +9,6 @@ package command import ( - "fmt" "os" "github.com/fatih/color" @@ -44,7 +43,7 @@ func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { // AgentCmd is the root command agentCmd := &cobra.Command{ - Use: fmt.Sprintf("%s [command]", os.Args[0]), + Use: os.Args[0] + " [command]", Short: "Datadog Cluster Agent at your service.", Long: ` Datadog Cluster Agent takes care of running checks that need run only once per cluster. diff --git a/cmd/cluster-agent/custommetrics/server.go b/cmd/cluster-agent/custommetrics/server.go index 95a6b6424a18fa..a8aece89ec32bd 100644 --- a/cmd/cluster-agent/custommetrics/server.go +++ b/cmd/cluster-agent/custommetrics/server.go @@ -10,6 +10,7 @@ package custommetrics import ( "context" + "errors" "fmt" "github.com/spf13/pflag" @@ -47,7 +48,7 @@ const ( func RunServer(ctx context.Context, apiCl *as.APIClient, datadogCl option.Option[datadogclient.Component]) error { defer clearServerResources() if apiCl == nil { - return fmt.Errorf("unable to run server with nil APIClient") + return errors.New("unable to run server with nil APIClient") } cmd = &DatadogMetricsAdapter{} @@ -99,7 +100,7 @@ func (a *DatadogMetricsAdapter) makeProviderOrDie(ctx context.Context, apiCl *as if dc, ok := datadogCl.Get(); ok { return externalmetrics.NewDatadogMetricProvider(ctx, apiCl, dc) } - return nil, fmt.Errorf("unable to create DatadogMetricProvider as DatadogClient failed with uninitialized datadog client") + return nil, errors.New("unable to create DatadogMetricProvider as DatadogClient failed with uninitialized datadog client") } datadogHPAConfigMap := custommetrics.GetConfigmapName() diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index d8957f16fe95ab..eb7e96523ffcff 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -291,7 +291,7 @@ func start(log log.Component, common.SetupInternalProfiling(settings, config, "") if !config.IsSet("api_key") { - return fmt.Errorf("no API key configured, exiting") + return errors.New("no API key configured, exiting") } // Expose the registered metrics via HTTP. @@ -358,7 +358,7 @@ func start(log log.Component, // * It is still able to serve metrics to the WPA controller and // * The metrics reported are reported as stale so that there is no "lie" about the accuracy of the reported metrics. // Serving stale data is better than serving no data at all. - demultiplexer.AddAgentStartupTelemetry(fmt.Sprintf("%s - Datadog Cluster Agent", version.AgentVersion)) + demultiplexer.AddAgentStartupTelemetry(version.AgentVersion + " - Datadog Cluster Agent") // Create event recorder eventBroadcaster := record.NewBroadcaster() @@ -394,7 +394,7 @@ func start(log log.Component, } if clusterName == "" { if config.GetBool("autoscaling.workload.enabled") || config.GetBool("autoscaling.cluster.enabled") { - return fmt.Errorf("Failed to start: autoscaling is enabled but no cluster name detected, exiting") + return errors.New("Failed to start: autoscaling is enabled but no cluster name detected, exiting") } pkglog.Warn("Failed to auto-detect a Kubernetes cluster name. We recommend you set it manually via the cluster_name config option") } @@ -486,7 +486,7 @@ func start(log log.Component, var pa workload.PodPatcher if config.GetBool("autoscaling.workload.enabled") { if rcClient == nil { - return fmt.Errorf("Remote config is disabled or failed to initialize, remote config is a required dependency for autoscaling") + return errors.New("Remote config is disabled or failed to initialize, remote config is a required dependency for autoscaling") } if !config.GetBool("admission_controller.enabled") { diff --git a/cmd/cws-instrumentation/command/command.go b/cmd/cws-instrumentation/command/command.go index 61aea67b20e50e..67510525c680a5 100644 --- a/cmd/cws-instrumentation/command/command.go +++ b/cmd/cws-instrumentation/command/command.go @@ -7,7 +7,6 @@ package command import ( - "fmt" "os" "github.com/spf13/cobra" @@ -20,7 +19,7 @@ type SubcommandFactory func() []*cobra.Command func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { // cwsInjectorCmd is the root command cwsInjectorCmd := &cobra.Command{ - Use: fmt.Sprintf("%s [command]", os.Args[0]), + Use: os.Args[0] + " [command]", Short: "Datadog Agent CWS Injector", Long: ` The Datadog Agent CWS Injector is used for multiple purposes: diff --git a/cmd/cws-instrumentation/subcommands/tracecmd/trace.go b/cmd/cws-instrumentation/subcommands/tracecmd/trace.go index 2a2bc9cc5f578e..80ad2a6d0cdd46 100644 --- a/cmd/cws-instrumentation/subcommands/tracecmd/trace.go +++ b/cmd/cws-instrumentation/subcommands/tracecmd/trace.go @@ -14,6 +14,7 @@ import ( "math" "os" "os/exec" + "strconv" "strings" "sync" "time" @@ -151,40 +152,40 @@ func Command() []*cobra.Command { args := []string{ "trace", - fmt.Sprintf(`--%s`, probeAddrOpt), + "--" + probeAddrOpt, params.ProbeAddr, } if params.Verbose { - args = append(args, fmt.Sprintf(`--%s`, verboseOpt)) + args = append(args, "--"+verboseOpt) } if params.Debug { - args = append(args, fmt.Sprintf(`--%s`, debugOpt)) + args = append(args, "--"+debugOpt) } if params.UID != -1 { - args = append(args, fmt.Sprintf(`--%s`, uidOpt), fmt.Sprintf(`%d`, params.UID)) + args = append(args, "--"+uidOpt, strconv.Itoa(int(params.UID))) } if params.GID != -1 { - args = append(args, fmt.Sprintf(`--%s`, gidOpt), fmt.Sprintf(`%d`, params.GID)) + args = append(args, "--"+gidOpt, strconv.Itoa(int(params.GID))) } if params.Async { - args = append(args, fmt.Sprintf(`--%s`, asyncOpt)) + args = append(args, "--"+asyncOpt) } if params.StatsDisabled { - args = append(args, fmt.Sprintf(`--%s`, disableStatsOpt)) + args = append(args, "--"+disableStatsOpt) } if params.ProcScanDisabled { - args = append(args, fmt.Sprintf(`--%s`, disableProcScanOpt)) + args = append(args, "--"+disableProcScanOpt) } if params.ScanProcEvery != "" { - args = append(args, fmt.Sprintf(`--%s`, scanProcEveryOpt), params.ScanProcEvery) + args = append(args, "--"+scanProcEveryOpt, params.ScanProcEvery) } if params.SeccompDisabled { - args = append(args, fmt.Sprintf(`--%s`, disableSeccompOpt)) + args = append(args, "--"+disableSeccompOpt) } for _, pid := range set { - args = append(args, fmt.Sprintf(`--%s`, pidOpt), fmt.Sprintf(`%d`, pid)) + args = append(args, "--"+pidOpt, strconv.Itoa(pid)) } cmd := exec.Command(executable, args...) diff --git a/cmd/installer/command/command.go b/cmd/installer/command/command.go index d4a62b4b167c7a..d547dead463b28 100644 --- a/cmd/installer/command/command.go +++ b/cmd/installer/command/command.go @@ -7,7 +7,6 @@ package command import ( - "fmt" "os" "runtime" @@ -57,7 +56,7 @@ func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { // AgentCmd is the root command agentCmd := &cobra.Command{ - Use: fmt.Sprintf("%s [command]", os.Args[0]), + Use: os.Args[0] + " [command]", Short: "Datadog Installer at your service.", Long: ` Datadog Installer installs datadog-packages based on your commands.`, diff --git a/cmd/installer/user/user_darwin.go b/cmd/installer/user/user_darwin.go index c19de4c2ebeb77..66b6456ae55981 100644 --- a/cmd/installer/user/user_darwin.go +++ b/cmd/installer/user/user_darwin.go @@ -9,12 +9,12 @@ package user import ( - "fmt" + "errors" "syscall" ) // ErrRootRequired is the error returned when an operation requires root privileges. -var ErrRootRequired = fmt.Errorf("operation requires root privileges") +var ErrRootRequired = errors.New("operation requires root privileges") // IsRoot always returns true on darwin. func IsRoot() bool { diff --git a/cmd/installer/user/user_nix.go b/cmd/installer/user/user_nix.go index 33a49cc2ed23f5..353a88b9d52c48 100644 --- a/cmd/installer/user/user_nix.go +++ b/cmd/installer/user/user_nix.go @@ -10,6 +10,7 @@ package user import ( "context" + "errors" "fmt" "syscall" @@ -17,7 +18,7 @@ import ( ) // ErrRootRequired is the error returned when an operation requires root privileges. -var ErrRootRequired = fmt.Errorf("operation requires root privileges") +var ErrRootRequired = errors.New("operation requires root privileges") // IsRoot returns true if the process is running as root. func IsRoot() bool { diff --git a/cmd/installer/user/user_windows.go b/cmd/installer/user/user_windows.go index cd2b236be1e2b0..aa0b04ea0d95d0 100644 --- a/cmd/installer/user/user_windows.go +++ b/cmd/installer/user/user_windows.go @@ -9,13 +9,14 @@ package user import ( + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/util/winutil" ) // ErrRootRequired is the error returned when an operation requires Administrator privileges. -var ErrRootRequired = fmt.Errorf("operation requires Administrator privileges") +var ErrRootRequired = errors.New("operation requires Administrator privileges") // IsRoot returns true if token has Administrators group enabled func IsRoot() bool { diff --git a/cmd/otel-agent/config/agent_config.go b/cmd/otel-agent/config/agent_config.go index d377cea7e46e57..efdd1d75c90653 100644 --- a/cmd/otel-agent/config/agent_config.go +++ b/cmd/otel-agent/config/agent_config.go @@ -65,7 +65,7 @@ var logLevelReverseMap = func(src map[string]logLevel) map[logLevel]string { }(logLevelMap) // ErrNoDDExporter indicates there is no Datadog exporter in the configs -var ErrNoDDExporter = fmt.Errorf("no datadog exporter found") +var ErrNoDDExporter = errors.New("no datadog exporter found") // NewConfigComponent creates a new config component from the given URIs func NewConfigComponent(ctx context.Context, ddCfg string, uris []string) (config.Component, error) { @@ -227,11 +227,11 @@ func getServiceConfig(cfg *confmap.Conf) (*service.Config, error) { var pipelineConfig *service.Config s := cfg.Get("service") if s == nil { - return nil, fmt.Errorf("service config not found") + return nil, errors.New("service config not found") } smap, ok := s.(map[string]any) if !ok { - return nil, fmt.Errorf("invalid service config") + return nil, errors.New("invalid service config") } err := confmap.NewFromStringMap(smap).Unmarshal(&pipelineConfig) if err != nil { @@ -285,7 +285,7 @@ func getDDExporterConfig(cfg *confmap.Conf) (*datadogconfig.Config, error) { // We only support one exporter for now // TODO: support multiple exporters if len(configs) > 1 { - return nil, fmt.Errorf("multiple datadog exporters found") + return nil, errors.New("multiple datadog exporters found") } datadogConfig := configs[0] diff --git a/cmd/process-agent/api/check.go b/cmd/process-agent/api/check.go index 4328e88708d29c..3036542dcb10a0 100644 --- a/cmd/process-agent/api/check.go +++ b/cmd/process-agent/api/check.go @@ -8,7 +8,6 @@ package api import ( "encoding/json" - "fmt" "html" "io" "net/http" @@ -24,7 +23,7 @@ func checkHandler(w http.ResponseWriter, req *http.Request) { checkOutput, ok := checks.GetCheckOutput(requestedCheck) if !ok { w.WriteHeader(http.StatusNotFound) - _, err := io.WriteString(w, fmt.Sprintf("%s check is not running or has not been scheduled yet\n", html.EscapeString(requestedCheck))) + _, err := io.WriteString(w, html.EscapeString(requestedCheck)+" check is not running or has not been scheduled yet\n") if err != nil { _ = log.Error() } diff --git a/cmd/process-agent/subcommands/config/config.go b/cmd/process-agent/subcommands/config/config.go index 48ef9bb74b01bb..53439092a2a0da 100644 --- a/cmd/process-agent/subcommands/config/config.go +++ b/cmd/process-agent/subcommands/config/config.go @@ -7,6 +7,7 @@ package config import ( + "errors" "fmt" "github.com/spf13/cobra" @@ -154,7 +155,7 @@ func setConfigValue(deps dependencies, args []string) error { } if len(args) != 2 { - return fmt.Errorf("exactly two parameters are required: the setting name and its value") + return errors.New("exactly two parameters are required: the setting name and its value") } hidden, err := c.Set(args[0], args[1]) @@ -178,7 +179,7 @@ func getConfigValue(deps dependencies, args []string) error { } if len(args) != 1 { - return fmt.Errorf("a single setting name must be specified") + return errors.New("a single setting name must be specified") } value, err := c.Get(args[0]) diff --git a/cmd/secrethelper/secret_helper.go b/cmd/secrethelper/secret_helper.go index a86c2018518ab2..f49db7d02b98e4 100644 --- a/cmd/secrethelper/secret_helper.go +++ b/cmd/secrethelper/secret_helper.go @@ -172,7 +172,7 @@ func readSecretsUsingPrefixes(secretsList []string, rootPath string, kubeSecretG case k8sSecretPrefix: res[secretID] = providers.ReadKubernetesSecret(kubeSecretGetter, id) default: - res[secretID] = secrets.SecretVal{Value: "", ErrorMsg: fmt.Sprintf("provider not supported: %s", prefix)} + res[secretID] = secrets.SecretVal{Value: "", ErrorMsg: "provider not supported: " + prefix} } } diff --git a/cmd/security-agent/command/command.go b/cmd/security-agent/command/command.go index a990c9c542fde6..49415c69e22d57 100644 --- a/cmd/security-agent/command/command.go +++ b/cmd/security-agent/command/command.go @@ -7,7 +7,7 @@ package command import ( - "fmt" + "errors" "path" "github.com/fatih/color" @@ -67,7 +67,7 @@ Datadog Security Agent takes care of running compliance and security checks.`, } if len(globalParams.ConfigFilePaths) == 1 && globalParams.ConfigFilePaths[0] == "" { - return fmt.Errorf("no Security Agent config files to load, exiting") + return errors.New("no Security Agent config files to load, exiting") } return nil }, diff --git a/cmd/security-agent/subcommands/config/config.go b/cmd/security-agent/subcommands/config/config.go index a30dcc39f28d59..0bbf4fb7e2c4d9 100644 --- a/cmd/security-agent/subcommands/config/config.go +++ b/cmd/security-agent/subcommands/config/config.go @@ -9,6 +9,7 @@ package config import ( + "errors" "fmt" "github.com/spf13/cobra" @@ -168,7 +169,7 @@ func showRuntimeConfiguration(_ log.Component, cfg config.Component, _ secrets.C func setConfigValue(_ log.Component, _ config.Component, _ secrets.Component, client ipc.HTTPClient, params *cliParams) error { if len(params.args) != 2 { - return fmt.Errorf("exactly two parameters are required: the setting name and its value") + return errors.New("exactly two parameters are required: the setting name and its value") } c, err := getSettingsClient(client) @@ -192,7 +193,7 @@ func setConfigValue(_ log.Component, _ config.Component, _ secrets.Component, cl func getConfigValue(_ log.Component, _ config.Component, _ secrets.Component, client ipc.HTTPClient, params *cliParams) error { if len(params.args) != 1 { - return fmt.Errorf("a single setting name must be specified") + return errors.New("a single setting name must be specified") } c, err := getSettingsClient(client) diff --git a/cmd/security-agent/subcommands/start/command.go b/cmd/security-agent/subcommands/start/command.go index 911b5c5484613a..dfdb15818d4c23 100644 --- a/cmd/security-agent/subcommands/start/command.go +++ b/cmd/security-agent/subcommands/start/command.go @@ -388,5 +388,5 @@ func setupInternalProfiling(config config.Component) error { } func secAgentKey(sub string) string { - return fmt.Sprintf("security_agent.%s", sub) + return "security_agent." + sub } diff --git a/cmd/serverless-init/cloudservice/appservice.go b/cmd/serverless-init/cloudservice/appservice.go index 0c39e5572a2113..2ec04fcdc8bb4f 100644 --- a/cmd/serverless-init/cloudservice/appservice.go +++ b/cmd/serverless-init/cloudservice/appservice.go @@ -7,7 +7,6 @@ package cloudservice import ( - "fmt" "maps" "os" @@ -76,12 +75,12 @@ func (a *AppService) Init(_ interface{}) error { // Shutdown emits the shutdown metric for AppService func (a *AppService) Shutdown(metricAgent serverlessMetrics.ServerlessMetricAgent, _ interface{}, _ error) { - metric.Add(fmt.Sprintf("%s.enhanced.shutdown", appServicePrefix), 1.0, a.GetSource(), metricAgent) + metric.Add(appServicePrefix+".enhanced.shutdown", 1.0, a.GetSource(), metricAgent) } // GetStartMetricName returns the metric name for container start (coldstart) events func (a *AppService) GetStartMetricName() string { - return fmt.Sprintf("%s.enhanced.cold_start", appServicePrefix) + return appServicePrefix + ".enhanced.cold_start" } // ShouldForceFlushAllOnForceFlushToSerializer is false usually. diff --git a/cmd/serverless-init/cloudservice/cloudrun.go b/cmd/serverless-init/cloudservice/cloudrun.go index 8f2e733fa598c7..749b43934078e1 100644 --- a/cmd/serverless-init/cloudservice/cloudrun.go +++ b/cmd/serverless-init/cloudservice/cloudrun.go @@ -155,12 +155,12 @@ func (c *CloudRun) Init(_ interface{}) error { // Shutdown emits the shutdown metric for CloudRun func (c *CloudRun) Shutdown(metricAgent serverlessMetrics.ServerlessMetricAgent, _ interface{}, _ error) { - metric.Add(fmt.Sprintf("%s.enhanced.shutdown", cloudRunPrefix), 1.0, c.GetSource(), metricAgent) + metric.Add(cloudRunPrefix+".enhanced.shutdown", 1.0, c.GetSource(), metricAgent) } // GetStartMetricName returns the metric name for container start (coldstart) events func (c *CloudRun) GetStartMetricName() string { - return fmt.Sprintf("%s.enhanced.cold_start", cloudRunPrefix) + return cloudRunPrefix + ".enhanced.cold_start" } // ShouldForceFlushAllOnForceFlushToSerializer is false usually. @@ -175,7 +175,7 @@ func isCloudRunService() bool { func isCloudRunFunction() bool { _, cloudRunFunctionMode := os.LookupEnv(functionTargetEnvVar) - log.Debug(fmt.Sprintf("cloud run namespace SET TO: %s", cloudRunFunction)) + log.Debug("cloud run namespace SET TO: " + cloudRunFunction) return cloudRunFunctionMode } diff --git a/cmd/serverless-init/cloudservice/cloudrun_jobs.go b/cmd/serverless-init/cloudservice/cloudrun_jobs.go index ea60741e64dd9e..342609dffd2865 100644 --- a/cmd/serverless-init/cloudservice/cloudrun_jobs.go +++ b/cmd/serverless-init/cloudservice/cloudrun_jobs.go @@ -8,6 +8,7 @@ package cloudservice import ( "fmt" "os" + "strconv" "time" "github.com/DataDog/datadog-agent/cmd/serverless-init/exitcode" @@ -116,11 +117,11 @@ func (c *CloudRunJobs) Init(traceAgent interface{}) error { // Shutdown submits the task duration and shutdown metrics for CloudRunJobs, // and completes and submits the job span. func (c *CloudRunJobs) Shutdown(metricAgent serverlessMetrics.ServerlessMetricAgent, traceAgent interface{}, runErr error) { - durationMetricName := fmt.Sprintf("%s.enhanced.task.duration", cloudRunJobsPrefix) + durationMetricName := cloudRunJobsPrefix + ".enhanced.task.duration" duration := float64(time.Since(c.startTime).Milliseconds()) metric.Add(durationMetricName, duration, c.GetSource(), metricAgent) - shutdownMetricName := fmt.Sprintf("%s.enhanced.task.ended", cloudRunJobsPrefix) + shutdownMetricName := cloudRunJobsPrefix + ".enhanced.task.ended" exitCode := exitcode.From(runErr) metric.Add(shutdownMetricName, 1.0, c.GetSource(), metricAgent, fmt.Sprintf("exit_code:%d", exitCode)) @@ -129,7 +130,7 @@ func (c *CloudRunJobs) Shutdown(metricAgent serverlessMetrics.ServerlessMetricAg // GetStartMetricName returns the metric name for container start events func (c *CloudRunJobs) GetStartMetricName() string { - return fmt.Sprintf("%s.enhanced.task.started", cloudRunJobsPrefix) + return cloudRunJobsPrefix + ".enhanced.task.started" } // ShouldForceFlushAllOnForceFlushToSerializer is true for cloud run jobs. @@ -196,7 +197,7 @@ func (c *CloudRunJobs) completeAndSubmitJobSpan(traceAgent interface{}, runErr e c.jobSpan.Error = 1 c.jobSpan.Meta["error.msg"] = runErr.Error() exitCode := exitcode.From(runErr) - c.jobSpan.Meta["exit_code"] = fmt.Sprintf("%d", exitCode) + c.jobSpan.Meta["exit_code"] = strconv.Itoa(exitCode) } serverlessInitTrace.SubmitSpan(c.jobSpan, CloudRunJobsOrigin, traceAgent) diff --git a/cmd/serverless-init/cloudservice/cloudrun_jobs_test.go b/cmd/serverless-init/cloudservice/cloudrun_jobs_test.go index 907b78fd933159..e3db2d41815c29 100644 --- a/cmd/serverless-init/cloudservice/cloudrun_jobs_test.go +++ b/cmd/serverless-init/cloudservice/cloudrun_jobs_test.go @@ -6,7 +6,7 @@ package cloudservice import ( - "fmt" + "errors" "os/exec" "testing" "time" @@ -99,7 +99,7 @@ func TestCloudRunJobsShutdownAddsExitCodeTag(t *testing.T) { agent := serverlessMetrics.ServerlessMetricAgent{Demux: demux} jobs := &CloudRunJobs{startTime: time.Now().Add(-time.Second)} - shutdownMetricName := fmt.Sprintf("%s.enhanced.task.ended", cloudRunJobsPrefix) + shutdownMetricName := cloudRunJobsPrefix + ".enhanced.task.ended" cmd := exec.Command("bash", "-c", "exit 1") err := cmd.Run() @@ -125,7 +125,7 @@ func TestCloudRunJobsShutdownExitCodeZeroOnSuccess(t *testing.T) { agent := serverlessMetrics.ServerlessMetricAgent{Demux: demux} jobs := &CloudRunJobs{startTime: time.Now().Add(-time.Second)} - shutdownMetricName := fmt.Sprintf("%s.enhanced.task.ended", cloudRunJobsPrefix) + shutdownMetricName := cloudRunJobsPrefix + ".enhanced.task.ended" jobs.Shutdown(agent, nil, nil) @@ -224,7 +224,7 @@ func TestCloudRunJobsCompleteAndSubmitJobSpanWithError(t *testing.T) { jobs.Init(mockAgent) // Simulate an error - testErr := fmt.Errorf("task failed") + testErr := errors.New("task failed") jobs.Shutdown(serverlessMetrics.ServerlessMetricAgent{}, mockAgent, testErr) // Verify the span was submitted diff --git a/cmd/serverless-init/cloudservice/containerapp.go b/cmd/serverless-init/cloudservice/containerapp.go index 0a593ffdcabd61..fe06ab11566244 100644 --- a/cmd/serverless-init/cloudservice/containerapp.go +++ b/cmd/serverless-init/cloudservice/containerapp.go @@ -151,12 +151,12 @@ func (c *ContainerApp) Init(_ interface{}) error { // Shutdown emits the shutdown metric for ContainerApp func (c *ContainerApp) Shutdown(metricAgent serverlessMetrics.ServerlessMetricAgent, _ interface{}, _ error) { - metric.Add(fmt.Sprintf("%s.enhanced.shutdown", containerAppPrefix), 1.0, c.GetSource(), metricAgent) + metric.Add(containerAppPrefix+".enhanced.shutdown", 1.0, c.GetSource(), metricAgent) } // GetStartMetricName returns the metric name for container start (coldstart) events func (c *ContainerApp) GetStartMetricName() string { - return fmt.Sprintf("%s.enhanced.cold_start", containerAppPrefix) + return containerAppPrefix + ".enhanced.cold_start" } // ShouldForceFlushAllOnForceFlushToSerializer is false usually. diff --git a/cmd/serverless-init/cloudservice/service.go b/cmd/serverless-init/cloudservice/service.go index 5aba09af0ca728..27646f938e01c9 100644 --- a/cmd/serverless-init/cloudservice/service.go +++ b/cmd/serverless-init/cloudservice/service.go @@ -6,8 +6,6 @@ package cloudservice import ( - "fmt" - "github.com/DataDog/datadog-agent/cmd/serverless-init/metric" "github.com/DataDog/datadog-agent/pkg/metrics" serverlessMetrics "github.com/DataDog/datadog-agent/pkg/serverless/metrics" @@ -82,12 +80,12 @@ func (l *LocalService) Init(_ interface{}) error { // Shutdown emits the shutdown metric for LocalService func (l *LocalService) Shutdown(agent serverlessMetrics.ServerlessMetricAgent, _ interface{}, _ error) { - metric.Add(fmt.Sprintf("%s.enhanced.shutdown", defaultPrefix), 1.0, l.GetSource(), agent) + metric.Add(defaultPrefix+".enhanced.shutdown", 1.0, l.GetSource(), agent) } // GetStartMetricName returns the metric name for container start (coldstart) events func (l *LocalService) GetStartMetricName() string { - return fmt.Sprintf("%s.enhanced.cold_start", defaultPrefix) + return defaultPrefix + ".enhanced.cold_start" } // ShouldForceFlushAllOnForceFlushToSerializer is false usually. diff --git a/cmd/system-probe/command/command.go b/cmd/system-probe/command/command.go index 95516b658385b4..12ca79c4b8d706 100644 --- a/cmd/system-probe/command/command.go +++ b/cmd/system-probe/command/command.go @@ -7,7 +7,6 @@ package command import ( - "fmt" "os" "slices" "strings" @@ -41,7 +40,7 @@ func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { // AgentCmd is the root command sysprobeCmd := &cobra.Command{ - Use: fmt.Sprintf("%s [command]", os.Args[0]), + Use: os.Args[0] + " [command]", Short: "Datadog Agent System Probe", Long: ` The Datadog Agent System Probe runs as superuser in order to instrument diff --git a/cmd/system-probe/modules/compliance_test.go b/cmd/system-probe/modules/compliance_test.go index 29624000ddafb8..244937640f27bd 100644 --- a/cmd/system-probe/modules/compliance_test.go +++ b/cmd/system-probe/modules/compliance_test.go @@ -78,7 +78,7 @@ func launchFakeProcess(ctx context.Context, t *testing.T, tmp, procname string) t.Fatal(err) } - cmd := exec.CommandContext(ctx, fakePgBinPath, fmt.Sprintf("--config-file=%s", fakePgConfPath)) + cmd := exec.CommandContext(ctx, fakePgBinPath, "--config-file="+fakePgConfPath) if err := cmd.Start(); err != nil { t.Fatalf("could not start fake process %q: %v", procname, err) } diff --git a/cmd/system-probe/subcommands/debug/command.go b/cmd/system-probe/subcommands/debug/command.go index 72dcab9bab9e85..bfaff557582298 100644 --- a/cmd/system-probe/subcommands/debug/command.go +++ b/cmd/system-probe/subcommands/debug/command.go @@ -68,7 +68,7 @@ func debugRuntime(sysprobeconfig sysprobeconfig.Component, cliParams *cliParams) var path string if len(cliParams.args) == 1 { - path = fmt.Sprintf("http://localhost/debug/%s", cliParams.args[0]) + path = "http://localhost/debug/" + cliParams.args[0] } else { path = fmt.Sprintf("http://localhost/%s/debug/%s", cliParams.args[0], cliParams.args[1]) } diff --git a/cmd/system-probe/subcommands/modrestart/command.go b/cmd/system-probe/subcommands/modrestart/command.go index a3fccf70317ed5..e62625c58e78eb 100644 --- a/cmd/system-probe/subcommands/modrestart/command.go +++ b/cmd/system-probe/subcommands/modrestart/command.go @@ -62,7 +62,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { func moduleRestart(sysprobeconfig sysprobeconfig.Component, cliParams *cliParams) error { cfg := sysprobeconfig.SysProbeObject() client := client.Get(cfg.SocketAddress) - url := fmt.Sprintf("http://localhost/module-restart/%s", cliParams.args[0]) + url := "http://localhost/module-restart/" + cliParams.args[0] resp, err := client.Post(url, "", nil) if err != nil { return err diff --git a/cmd/system-probe/subcommands/runtime/command.go b/cmd/system-probe/subcommands/runtime/command.go index f5f69d03502a03..3930d5d35367f4 100644 --- a/cmd/system-probe/subcommands/runtime/command.go +++ b/cmd/system-probe/subcommands/runtime/command.go @@ -564,7 +564,7 @@ func downloadPolicy(log log.Component, config config.Component, _ secrets.Compon } // Extract and merge rules from custom policies - var customRules string + var customRulesBuilder strings.Builder for _, customPolicy := range customPolicies { customPolicyLines := strings.Split(customPolicy, "\n") rulesIndex := -1 @@ -575,9 +575,11 @@ func downloadPolicy(log log.Component, config config.Component, _ secrets.Compon } } if rulesIndex != -1 && rulesIndex+1 < len(customPolicyLines) { - customRules += "\n" + strings.Join(customPolicyLines[rulesIndex+1:], "\n") + customRulesBuilder.WriteString("\n") + customRulesBuilder.WriteString(strings.Join(customPolicyLines[rulesIndex+1:], "\n")) } } + customRules := customRulesBuilder.String() // Output depending on user's specification var outputContent string diff --git a/cmd/system-probe/subcommands/usm/sysinfo_linux.go b/cmd/system-probe/subcommands/usm/sysinfo_linux.go index 4c86e98fe46e44..ad0f8d9dc7459c 100644 --- a/cmd/system-probe/subcommands/usm/sysinfo_linux.go +++ b/cmd/system-probe/subcommands/usm/sysinfo_linux.go @@ -12,6 +12,7 @@ import ( "os" "runtime" "sort" + "strings" "time" "github.com/spf13/cobra" @@ -185,12 +186,12 @@ func formatCmdline(args []string) string { if len(args) == 0 { return "" } - result := "" + var builder strings.Builder for i, arg := range args { if i > 0 { - result += " " + builder.WriteString(" ") } - result += arg + builder.WriteString(arg) } - return result + return builder.String() } diff --git a/cmd/systray/command/command.go b/cmd/systray/command/command.go index ec4c6207f522d4..cbb694c26c694a 100644 --- a/cmd/systray/command/command.go +++ b/cmd/systray/command/command.go @@ -8,6 +8,7 @@ package command import ( + "errors" "fmt" "os" "path/filepath" @@ -159,7 +160,7 @@ func ensureElevated(params systray.Params) error { // user is not an admin if params.LaunchElevatedFlag { - return fmt.Errorf("not running as elevated but elevated flag is set") + return errors.New("not running as elevated but elevated flag is set") } // attempt to launch as admin @@ -168,7 +169,7 @@ func ensureElevated(params systray.Params) error { return err } - return fmt.Errorf("exiting to allow elevated process to start") + return errors.New("exiting to allow elevated process to start") } // relaunchElevated launch another instance of the current process asking it to carry out a command as admin. diff --git a/cmd/trace-agent/subcommands/info/command.go b/cmd/trace-agent/subcommands/info/command.go index ef913650d04de9..a3c01952c8cce2 100644 --- a/cmd/trace-agent/subcommands/info/command.go +++ b/cmd/trace-agent/subcommands/info/command.go @@ -7,7 +7,7 @@ package info import ( - "fmt" + "errors" "os" "github.com/spf13/cobra" @@ -55,7 +55,7 @@ func runTraceAgentInfoFct(params *subcommands.GlobalParams, fct interface{}) err func agentInfo(config config.Component) error { tracecfg := config.Object() if tracecfg == nil { - return fmt.Errorf("Unable to successfully parse config") + return errors.New("Unable to successfully parse config") } if err := info.InitInfo(tracecfg); err != nil { return err diff --git a/cmd/trace-agent/test/testsuite/config_set_test.go b/cmd/trace-agent/test/testsuite/config_set_test.go index 1a9175490aca21..d8eee06a766fe1 100644 --- a/cmd/trace-agent/test/testsuite/config_set_test.go +++ b/cmd/trace-agent/test/testsuite/config_set_test.go @@ -6,7 +6,6 @@ package testsuite import ( - "fmt" "net/http" "testing" "time" @@ -36,7 +35,7 @@ func TestConfigSetHandlerUnauthenticated(t *testing.T) { assert.NotContains(t, logstr, "| DEBUG |") assert.Contains(t, logstr, "| INFO |") - resp, err := r.DoReq(fmt.Sprintf("config/set?log_level=%s", log.WarnStr), http.MethodPost, nil) + resp, err := r.DoReq("config/set?log_level="+log.WarnStr, http.MethodPost, nil) if err != nil { t.Fatal(err) } diff --git a/comp/agent/autoexit/autoexitimpl/manager.go b/comp/agent/autoexit/autoexitimpl/manager.go index 44089dd4538d0a..5f9ee8a8b7c36b 100644 --- a/comp/agent/autoexit/autoexitimpl/manager.go +++ b/comp/agent/autoexit/autoexitimpl/manager.go @@ -7,6 +7,7 @@ package autoexitimpl import ( "context" + "errors" "fmt" "os" "regexp" @@ -59,7 +60,7 @@ func configureAutoExit(ctx context.Context, cfg config.Component, log log.Compon func startAutoExit(ctx context.Context, sd exitDetector, log log.Component, tickerPeriod, validationPeriod time.Duration) error { if sd == nil { - return fmt.Errorf("a shutdown detector must be provided") + return errors.New("a shutdown detector must be provided") } selfProcess, err := os.FindProcess(os.Getpid()) diff --git a/comp/agent/expvarserver/expvarserverimpl/expvarserver.go b/comp/agent/expvarserver/expvarserverimpl/expvarserver.go index c607dfa6ce8284..07456a8c501d35 100644 --- a/comp/agent/expvarserver/expvarserverimpl/expvarserver.go +++ b/comp/agent/expvarserver/expvarserverimpl/expvarserver.go @@ -9,7 +9,6 @@ package expvarserverimpl import ( "context" "errors" - "fmt" "net/http" "go.uber.org/fx" @@ -40,7 +39,7 @@ func newExpvarServer(deps dependencies) expvarserver.Component { deps.Lc.Append(fx.Hook{ OnStart: func(context.Context) error { expvarServer = &http.Server{ - Addr: fmt.Sprintf("127.0.0.1:%s", expvarPort), + Addr: "127.0.0.1:" + expvarPort, Handler: http.DefaultServeMux, } go func() { diff --git a/comp/api/api/apiimpl/api_test.go b/comp/api/api/apiimpl/api_test.go index 0ce25893c9e8b5..7003d9176e8bca 100644 --- a/comp/api/api/apiimpl/api_test.go +++ b/comp/api/api/apiimpl/api_test.go @@ -234,7 +234,7 @@ func TestStartServerWithGrpcServer(t *testing.T) { addr := deps.API.CMDServerAddress().String() - url := fmt.Sprintf("https://%s", addr) + url := "https://" + addr req, err := http.NewRequest(http.MethodGet, url, nil) require.NoError(t, err) req.Header.Set("Content-Type", "application/grpc") @@ -277,7 +277,7 @@ func TestStartServerWithoutGrpcServer(t *testing.T) { addr := deps.API.CMDServerAddress().String() - url := fmt.Sprintf("https://%s", addr) + url := "https://" + addr // test the api routes does not routes grpc request to the grpc server req, err := http.NewRequest(http.MethodGet, url, nil) diff --git a/comp/api/api/apiimpl/internal/config/endpoint.go b/comp/api/api/apiimpl/internal/config/endpoint.go index aa19efa9d2ca88..c61d661a4b1b6a 100644 --- a/comp/api/api/apiimpl/internal/config/endpoint.go +++ b/comp/api/api/apiimpl/internal/config/endpoint.go @@ -7,6 +7,7 @@ package config import ( + "errors" "expvar" "fmt" "html" @@ -144,7 +145,7 @@ func encodeInterfaceSliceToStringMap(c model.Reader, key string) ([]map[string]s } values, ok := value.([]interface{}) if !ok { - return nil, fmt.Errorf("key does not host a slice of interfaces") + return nil, errors.New("key does not host a slice of interfaces") } return util.GetSliceOfStringMap(values) diff --git a/comp/api/api/apiimpl/server.go b/comp/api/api/apiimpl/server.go index b80b475f22a7f9..dc53cce5e025d9 100644 --- a/comp/api/api/apiimpl/server.go +++ b/comp/api/api/apiimpl/server.go @@ -8,6 +8,7 @@ package apiimpl import ( "crypto/tls" "crypto/x509" + "errors" "fmt" stdLog "log" "net" @@ -87,7 +88,7 @@ func (server *apiServer) stopServers() { func authTagGetter(serverTLSConfig *tls.Config) (func(r *http.Request) string, error) { // Read the IPC certificate from the server TLS config if serverTLSConfig == nil || len(serverTLSConfig.Certificates) == 0 || len(serverTLSConfig.Certificates[0].Certificate) == 0 { - return nil, fmt.Errorf("no certificates found in server TLS config") + return nil, errors.New("no certificates found in server TLS config") } cert, err := x509.ParseCertificate(serverTLSConfig.Certificates[0].Certificate[0]) diff --git a/comp/api/commonendpoints/impl/common_endpoints_test.go b/comp/api/commonendpoints/impl/common_endpoints_test.go index 78eb34e94a724a..f1059ad35af5bc 100644 --- a/comp/api/commonendpoints/impl/common_endpoints_test.go +++ b/comp/api/commonendpoints/impl/common_endpoints_test.go @@ -8,7 +8,7 @@ package impl import ( "context" "encoding/json" - "fmt" + "errors" "net/http" "net/http/httptest" "testing" @@ -52,7 +52,7 @@ func TestStopAgent(t *testing.T) { t.Log("Received stop command, shutting down...") stopCh <- nil case <-time.After(time.Second * 30): // Timeout after 5 seconds - stopCh <- fmt.Errorf("Timeout waiting for stop signal") + stopCh <- errors.New("Timeout waiting for stop signal") } }() diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/check.go b/comp/checks/windowseventlog/windowseventlogimpl/check/check.go index 3432385e2bb78c..ce5adebc582a40 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/check.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/check.go @@ -9,6 +9,7 @@ package evtlog import ( + "errors" "fmt" "regexp" "strings" @@ -210,7 +211,7 @@ func (c *Check) validateConfig() error { return fmt.Errorf("invalid instance config `event_priority`: %w", err) } if isaffirmative(c.config.instance.LegacyMode) && isaffirmative(c.config.instance.LegacyModeV2) { - return fmt.Errorf("legacy_mode and legacy_mode_v2 are both true. Each instance must set a single mode to true") + return errors.New("legacy_mode and legacy_mode_v2 are both true. Each instance must set a single mode to true") } if isaffirmative(c.config.instance.LegacyMode) { // wrap ErrSkipCheckInstance for graceful skipping @@ -232,13 +233,13 @@ func (c *Check) validateConfig() error { ddSecurityEventsIsSetAndValid := false if val, isSet := c.config.instance.DDSecurityEvents.Get(); isSet && len(val) > 0 { if !ddSecurityEventsFeatureEnabled { - return fmt.Errorf("instance config `dd_security_events` is set, but the feature is not yet available") + return errors.New("instance config `dd_security_events` is set, but the feature is not yet available") } if !strings.EqualFold(val, "high") && !strings.EqualFold(val, "low") { - return fmt.Errorf("instance config `dd_security_events`, if set, must be either 'high' or 'low'") + return errors.New("instance config `dd_security_events`, if set, must be either 'high' or 'low'") } if _, isSet := c.logsAgent.Get(); !isSet { - return fmt.Errorf("instance config `dd_security_events` is set, but logs-agent is not available. Set `logs_enabled: true` in datadog.yaml to enable sending Logs to Datadog") + return errors.New("instance config `dd_security_events` is set, but logs-agent is not available. Set `logs_enabled: true` in datadog.yaml to enable sending Logs to Datadog") } f, err := c.loadDDSecurityProfile(val) if err != nil { @@ -248,15 +249,15 @@ func (c *Check) validateConfig() error { ddSecurityEventsIsSetAndValid = true } if !channelPathIsSetAndNotEmpty && !ddSecurityEventsIsSetAndValid { - return fmt.Errorf("instance config `path` or `dd_security_events` must be provided") + return errors.New("instance config `path` or `dd_security_events` must be provided") } if channelPathIsSetAndNotEmpty && ddSecurityEventsIsSetAndValid { - return fmt.Errorf("instance config `path` and `dd_security_events` are mutually exclusive, only one must be set per instance") + return errors.New("instance config `path` and `dd_security_events` are mutually exclusive, only one must be set per instance") } if val, isSet := c.config.instance.Query.Get(); !isSet || len(val) == 0 { // Query should always be set by this point, but might be "" - return fmt.Errorf("instance config `query` if provided must not be empty") + return errors.New("instance config `query` if provided must not be empty") } startMode, isSet := c.config.instance.Start.Get() if !isSet || (startMode != "now" && startMode != "oldest") { diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/check_test.go b/comp/checks/windowseventlog/windowseventlogimpl/check/check_test.go index 15d824540b0717..5d4bba605b2be0 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/check_test.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/check_test.go @@ -142,7 +142,7 @@ func TestGetEventsTestSuite(t *testing.T) { testerNames := eventlog_test.GetEnabledAPITesters() for _, tiName := range testerNames { - t.Run(fmt.Sprintf("%sAPI", tiName), func(t *testing.T) { + t.Run(tiName+"API", func(t *testing.T) { if tiName == "Fake" { t.Skip("Fake API does not implement EvtRenderValues") } @@ -652,7 +652,7 @@ start: now s.channelPath)) if len(tc.confPriority) > 0 { - instanceConfig = append(instanceConfig, []byte(fmt.Sprintf("event_priority: %s", tc.confPriority))...) + instanceConfig = append(instanceConfig, []byte("event_priority: "+tc.confPriority)...) } check, err := s.newCheck(instanceConfig) diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/config_helpers.go b/comp/checks/windowseventlog/windowseventlogimpl/check/config_helpers.go index 05e7ad00c593fb..4be062d8689375 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/config_helpers.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/config_helpers.go @@ -8,6 +8,7 @@ package evtlog import ( + "errors" "fmt" "regexp" @@ -55,7 +56,7 @@ func evtRPCFlagsFromString(flags string) (uint, error) { func evtRPCFlagsFromOption(authType option.Option[string]) (uint, error) { val, isSet := authType.Get() if !isSet { - return 0, fmt.Errorf("option is not set") + return 0, errors.New("option is not set") } return evtRPCFlagsFromString(val) } @@ -68,7 +69,7 @@ func isaffirmative(o option.Option[bool]) bool { func getEventPriorityFromOption(o option.Option[string]) (agentEvent.Priority, error) { val, isSet := o.Get() if !isSet { - return "", fmt.Errorf("option is not set") + return "", errors.New("option is not set") } eventPriority, err := agentEvent.GetEventPriorityFromString(val) if err != nil { diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/eventdatafilter/config.go b/comp/checks/windowseventlog/windowseventlogimpl/check/eventdatafilter/config.go index 82c1059e4117d2..fdbc2f292f65ec 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/eventdatafilter/config.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/eventdatafilter/config.go @@ -8,6 +8,7 @@ package eventdatafilter import ( + "errors" "fmt" "github.com/Masterminds/semver/v3" @@ -34,7 +35,7 @@ func unmarshalEventdataFilterSchema(config []byte) (*eventDataFilterSchema, erro return nil, fmt.Errorf("could not unmarshal schema_version: %w", err) } if version.SchemaVersion == "" { - return nil, fmt.Errorf("schema_version is required but is missing or empty") + return nil, errors.New("schema_version is required but is missing or empty") } supported, err := supportedVersion(version.SchemaVersion) if err != nil { diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/eventdatafilter/filter.go b/comp/checks/windowseventlog/windowseventlogimpl/check/eventdatafilter/filter.go index 8f6127dc062028..1f9b66123714df 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/eventdatafilter/filter.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/eventdatafilter/filter.go @@ -9,6 +9,7 @@ package eventdatafilter import ( + "errors" "fmt" evtapi "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api" @@ -43,7 +44,7 @@ func NewFilterFromConfig(config []byte) (Filter, error) { func (f *eventIDFilter) Match(e EventData) (bool, error) { vals := e.SystemValues() if vals == nil { - return false, fmt.Errorf("event data is nil") + return false, errors.New("event data is nil") } eventID, err := vals.UInt(evtapi.EvtSystemEventID) if err != nil { diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/filters.go b/comp/checks/windowseventlog/windowseventlogimpl/check/filters.go index cf258d44b9c0ba..b431c7943669b1 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/filters.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/filters.go @@ -80,7 +80,7 @@ func genQueryPart[T string | int](vals []T, formatVal func(T) (string, error)) ( } func formatSourcePart(source string) (string, error) { - part := fmt.Sprintf("@Name=%s", xpathQuoteString(source)) + part := "@Name=" + xpathQuoteString(source) return part, nil } diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/subscription.go b/comp/checks/windowseventlog/windowseventlogimpl/check/subscription.go index a71cb538ffb8ce..ca6f3df8806d3e 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/subscription.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/subscription.go @@ -8,6 +8,7 @@ package evtlog import ( + "errors" "fmt" "io" "os" @@ -34,7 +35,7 @@ func (c *Check) getChannelPath() (string, error) { if val, isSet := c.config.instance.ChannelPath.Get(); isSet { return val, nil } - return "", fmt.Errorf("channel path is not set") + return "", errors.New("channel path is not set") } func (c *Check) initSubscription() error { @@ -45,15 +46,15 @@ func (c *Check) initSubscription() error { // so we might as well check they are set here, too. startMode, isSet := c.config.instance.Start.Get() if !isSet { - return fmt.Errorf("start mode is not set") + return errors.New("start mode is not set") } bookmarkFrequency, isSet := c.config.instance.BookmarkFrequency.Get() if !isSet { - return fmt.Errorf("bookmark frequency is not set") + return errors.New("bookmark frequency is not set") } payloadSize, isSet := c.config.instance.PayloadSize.Get() if !isSet { - return fmt.Errorf("payload size is not set") + return errors.New("payload size is not set") } channelPath, err := c.getChannelPath() if err != nil { @@ -61,7 +62,7 @@ func (c *Check) initSubscription() error { } query, isSet := c.config.instance.Query.Get() if !isSet { - return fmt.Errorf("query is not set") + return errors.New("query is not set") } // Create BookmarkManager for handling bookmark lifecycle @@ -153,7 +154,7 @@ func (c *Check) startSubscription() error { } } else { // validateConfig should prevent this from happening - return fmt.Errorf("neither channel path nor dd_security_events is set") + return errors.New("neither channel path nor dd_security_events is set") } return nil } @@ -170,12 +171,12 @@ func (c *Check) logsSubmitterPipeline(inCh <-chan *evtapi.EventRecord, wg *sync. logsAgent, isSet := c.logsAgent.Get() if !isSet { // sanity: validateConfig should prevent this from happening - return fmt.Errorf("no logs agent available") + return errors.New("no logs agent available") } if c.ddSecurityEventsFilter == nil { // sanity: validateConfig should prevent this from happening - return fmt.Errorf("no security profile loaded") + return errors.New("no security profile loaded") } eventWithDataCh := c.eventDataGetter(c.fetchEventsLoopStop, inCh, wg) @@ -328,9 +329,9 @@ func (c *Check) getProfilesDir() (string, error) { } else { root = c.agentConfig.GetString("confd_path") if root == "" { - return "", fmt.Errorf("confd_path is not set") + return "", errors.New("confd_path is not set") } - root = filepath.Join(root, fmt.Sprintf(`%s.d`, CheckName)) + root = filepath.Join(root, CheckName+".d") } return filepath.Join(root, "profiles"), nil } diff --git a/comp/checks/winregistry/impl/winregistryimpl.go b/comp/checks/winregistry/impl/winregistryimpl.go index 5409e145ba6cde..a222052f7ff64e 100644 --- a/comp/checks/winregistry/impl/winregistryimpl.go +++ b/comp/checks/winregistry/impl/winregistryimpl.go @@ -157,7 +157,7 @@ func (c *WindowsRegistryCheck) Configure(senderManager sender.SenderManager, int agentLog.Errorf("configuration error: %s (%v)", err, err.Value()) } } - return fmt.Errorf("configuration validation failed") + return errors.New("configuration validation failed") } var initCfg checkInitCfg diff --git a/comp/collector/collector/collectorimpl/collector.go b/comp/collector/collector/collectorimpl/collector.go index 4e330d30be0659..1c968647ff318e 100644 --- a/comp/collector/collector/collectorimpl/collector.go +++ b/comp/collector/collector/collectorimpl/collector.go @@ -8,6 +8,7 @@ package collectorimpl import ( "context" + "errors" "fmt" "sync" "time" @@ -208,7 +209,7 @@ func (c *collectorImpl) RunCheck(inner check.Check) (checkid.ID, error) { var emptyID checkid.ID if c.state.Load() != started { - return emptyID, fmt.Errorf("the collector is not running") + return emptyID, errors.New("the collector is not running") } if _, found := c.checks[ch.ID()]; found { @@ -247,7 +248,7 @@ func (c *collectorImpl) StopCheck(id checkid.ID) error { c.m.RLock() if !c.started() { c.m.RUnlock() - return fmt.Errorf("the collector is not running") + return errors.New("the collector is not running") } ch, found := c.checks[id] @@ -357,7 +358,7 @@ func (c *collectorImpl) GetChecks() []check.Check { // ReloadAllCheckInstances completely restarts a check with a new configuration and returns a list of killed check IDs func (c *collectorImpl) ReloadAllCheckInstances(name string, newInstances []check.Check) ([]checkid.ID, error) { if !c.started() { - return nil, fmt.Errorf("The collector is not running") + return nil, errors.New("The collector is not running") } // Stop all the old instances diff --git a/comp/collector/collector/collectorimpl/internal/middleware/check_wrapper.go b/comp/collector/collector/collectorimpl/internal/middleware/check_wrapper.go index ff3133fa0c1c3a..89a57063898d8e 100644 --- a/comp/collector/collector/collectorimpl/internal/middleware/check_wrapper.go +++ b/comp/collector/collector/collectorimpl/internal/middleware/check_wrapper.go @@ -7,7 +7,6 @@ package middleware import ( - "fmt" "sync" "time" @@ -54,7 +53,7 @@ func (c *CheckWrapper) Run() (err error) { // Start telemetry span if telemetry is enabled if telemetry, isSet := c.agentTelemetry.Get(); isSet { - span, _ := telemetry.StartStartupSpan(fmt.Sprintf("check.%s", c.inner.String())) + span, _ := telemetry.StartStartupSpan("check." + c.inner.String()) defer span.Finish(err) } diff --git a/comp/core/agenttelemetry/impl/agenttelemetry_test.go b/comp/core/agenttelemetry/impl/agenttelemetry_test.go index b2a2c336e5a9dc..17f7c98176b93f 100644 --- a/comp/core/agenttelemetry/impl/agenttelemetry_test.go +++ b/comp/core/agenttelemetry/impl/agenttelemetry_test.go @@ -8,10 +8,12 @@ package agenttelemetryimpl import ( "context" "encoding/json" + "errors" "fmt" "io" "maps" "net/http" + "strings" "testing" dto "github.com/prometheus/client_model/go" @@ -102,17 +104,17 @@ func makeStableMetricMap(metrics []*dto.Metric) map[string]*dto.Metric { metricMap := make(map[string]*dto.Metric) for _, m := range metrics { - tagsKey := "" + var tagsKeyBuilder strings.Builder // sort by names and values before insertion origTags := m.GetLabel() if len(origTags) > 0 { for _, t := range cloneLabelsSorted(origTags) { - tagsKey += makeLabelPairKey(t) + tagsKeyBuilder.WriteString(makeLabelPairKey(t)) } } - metricMap[tagsKey] = m + metricMap[tagsKeyBuilder.String()] = m } return metricMap @@ -172,7 +174,7 @@ func getTestAtel(t *testing.T, atel := createAtel(cfg, log, tel, sndr, runner) if atel == nil { - err = fmt.Errorf("failed to create atel") + err = errors.New("failed to create atel") } assert.NoError(t, err) @@ -195,12 +197,12 @@ func (p *Payload) UnmarshalAgentMetrics(itfPayload map[string]interface{}) error var metricsItfPayload map[string]interface{} metricsItfPayload, ok = itfPayload["payload"].(map[string]interface{}) if !ok { - return fmt.Errorf("payload not found") + return errors.New("payload not found") } var metricsItf map[string]interface{} metricsItf, ok = metricsItfPayload["metrics"].(map[string]interface{}) if !ok { - return fmt.Errorf("metrics not found") + return errors.New("metrics not found") } var err error @@ -238,7 +240,7 @@ func (p *Payload) UnmarshalAgentMetrics(itfPayload map[string]interface{}) error func (p *Payload) UnmarshalMessageBatch(itfPayload map[string]interface{}) error { payloadsRaw, ok := itfPayload["payload"].([]interface{}) if !ok { - return fmt.Errorf("payload not found") + return errors.New("payload not found") } // ensure all payloads which should be agent-metrics @@ -246,20 +248,20 @@ func (p *Payload) UnmarshalMessageBatch(itfPayload map[string]interface{}) error for _, payloadRaw := range payloadsRaw { itfChildPayload, ok := payloadRaw.(map[string]interface{}) if !ok { - return fmt.Errorf("invalid payload item type") + return errors.New("invalid payload item type") } requestTypeRaw, ok := itfChildPayload["request_type"] if !ok { - return fmt.Errorf("request_type not found") + return errors.New("request_type not found") } requestType, ok := requestTypeRaw.(string) if !ok { - return fmt.Errorf("request_type type is invalid") + return errors.New("request_type type is invalid") } if requestType != "agent-metrics" { - return fmt.Errorf("request_type should be agent-metrics") + return errors.New("request_type should be agent-metrics") } var payload Payload @@ -284,11 +286,11 @@ func (p *Payload) UnmarshalJSON(b []byte) (err error) { requestTypeRaw, ok := itfPayload["request_type"] if !ok { - return fmt.Errorf("request_type not found") + return errors.New("request_type not found") } requestType, ok := requestTypeRaw.(string) if !ok { - return fmt.Errorf("request_type type is invalid") + return errors.New("request_type type is invalid") } if requestType == "agent-metrics" { @@ -299,7 +301,7 @@ func (p *Payload) UnmarshalJSON(b []byte) (err error) { return p.UnmarshalMessageBatch(itfPayload) } - return fmt.Errorf("request_type should be either agent-metrics or message-batch") + return errors.New("request_type should be either agent-metrics or message-batch") } func getPayload(a *atel) (*Payload, error) { diff --git a/comp/core/autodiscovery/autodiscoveryimpl/config_poller.go b/comp/core/autodiscovery/autodiscoveryimpl/config_poller.go index a17f2f817b80de..872079c28d059a 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/config_poller.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/config_poller.go @@ -85,7 +85,7 @@ func (cp *configPoller) stream(ch chan struct{}, provider types.StreamingConfigP var ranOnce bool ctx, cancel := context.WithCancel(context.Background()) changesCh := provider.Stream(ctx) - healthHandle := health.RegisterLiveness(fmt.Sprintf("ad-config-provider-%s", cp.provider.String())) + healthHandle := health.RegisterLiveness("ad-config-provider-" + cp.provider.String()) cp.isRunning = true @@ -128,7 +128,7 @@ func (cp *configPoller) stream(ch chan struct{}, provider types.StreamingConfigP func (cp *configPoller) poll(provider types.CollectingConfigProvider, ac *AutoConfig) { ctx, cancel := context.WithCancel(context.Background()) ticker := time.NewTicker(cp.pollInterval) - healthHandle := health.RegisterLiveness(fmt.Sprintf("ad-config-provider-%s", cp.provider.String())) + healthHandle := health.RegisterLiveness("ad-config-provider-" + cp.provider.String()) cp.isRunning = true diff --git a/comp/core/autodiscovery/common/utils/annotations.go b/comp/core/autodiscovery/common/utils/annotations.go index 6bd0fcb83f1226..ed7f516d73b78b 100644 --- a/comp/core/autodiscovery/common/utils/annotations.go +++ b/comp/core/autodiscovery/common/utils/annotations.go @@ -115,7 +115,7 @@ func extractLogsTemplatesFromMap(configs []integration.Config, key string, input // ParseCheckNames returns a slice of check names parsed from a JSON array func ParseCheckNames(names string) (res []string, err error) { if names == "" { - return nil, fmt.Errorf("check_names is empty") + return nil, errors.New("check_names is empty") } if err = json.Unmarshal([]byte(names), &res); err != nil { @@ -129,7 +129,7 @@ func ParseCheckNames(names string) (res []string, err error) { // contained in the `value` parameter func ParseJSONValue(value string) ([][]integration.Data, error) { if value == "" { - return nil, fmt.Errorf("value is empty") + return nil, errors.New("value is empty") } var rawRes []interface{} diff --git a/comp/core/autodiscovery/common/utils/annotations_test.go b/comp/core/autodiscovery/common/utils/annotations_test.go index 2e2bb20f774aed..d916427a4d066f 100644 --- a/comp/core/autodiscovery/common/utils/annotations_test.go +++ b/comp/core/autodiscovery/common/utils/annotations_test.go @@ -258,25 +258,25 @@ func TestParseJSONValue(t *testing.T) { { name: "empty value", inputValue: "", - expectedErr: fmt.Errorf("value is empty"), + expectedErr: errors.New("value is empty"), expectedReturnValue: nil, }, { name: "value is not a list", inputValue: "{}", - expectedErr: fmt.Errorf("failed to unmarshal JSON: json: cannot unmarshal object into Go value of type []interface {}"), + expectedErr: errors.New("failed to unmarshal JSON: json: cannot unmarshal object into Go value of type []interface {}"), expectedReturnValue: nil, }, { name: "invalid json", inputValue: "[{]", - expectedErr: fmt.Errorf("failed to unmarshal JSON: invalid character ']' looking for beginning of object key string"), + expectedErr: errors.New("failed to unmarshal JSON: invalid character ']' looking for beginning of object key string"), expectedReturnValue: nil, }, { name: "bad type", inputValue: "[1, {\"test\": 1}, \"test\"]", - expectedErr: fmt.Errorf("failed to decode JSON Object '1' to integration.Data struct: found non JSON object type, value is: '1'"), + expectedErr: errors.New("failed to decode JSON Object '1' to integration.Data struct: found non JSON object type, value is: '1'"), expectedReturnValue: nil, }, { diff --git a/comp/core/autodiscovery/configresolver/configresolver.go b/comp/core/autodiscovery/configresolver/configresolver.go index 0046d864748b2f..ddce734757d084 100644 --- a/comp/core/autodiscovery/configresolver/configresolver.go +++ b/comp/core/autodiscovery/configresolver/configresolver.go @@ -627,7 +627,7 @@ func getEnvvar(envVar string, svc listeners.Service) (string, error) { if svc != nil { return "", fmt.Errorf("envvar name is missing, skipping service %s", svc.GetServiceID()) } - return "", fmt.Errorf("envvar name is missing") + return "", errors.New("envvar name is missing") } if !allowEnvVar(envVar) { diff --git a/comp/core/autodiscovery/listeners/kube_endpoints.go b/comp/core/autodiscovery/listeners/kube_endpoints.go index d920cf84d68de3..e916691cb8d434 100644 --- a/comp/core/autodiscovery/listeners/kube_endpoints.go +++ b/comp/core/autodiscovery/listeners/kube_endpoints.go @@ -353,9 +353,9 @@ func processEndpoints(kep *v1.Endpoints, tags []string, filterStore workloadfilt hosts: map[string]string{"endpoint": host.IP}, ports: ports, tags: []string{ - fmt.Sprintf("kube_service:%s", kep.Name), - fmt.Sprintf("kube_namespace:%s", kep.Namespace), - fmt.Sprintf("kube_endpoint_ip:%s", host.IP), + "kube_service:" + kep.Name, + "kube_namespace:" + kep.Namespace, + "kube_endpoint_ip:" + host.IP, }, metricsExcluded: metricsExcluded, globalExcluded: globalExcluded, diff --git a/comp/core/autodiscovery/listeners/kube_services.go b/comp/core/autodiscovery/listeners/kube_services.go index 64ac392afbbfcd..6bb3eed6cd01e0 100644 --- a/comp/core/autodiscovery/listeners/kube_services.go +++ b/comp/core/autodiscovery/listeners/kube_services.go @@ -260,8 +260,8 @@ func processService(ksvc *v1.Service, filterStore workloadfilter.Component) *Kub // Service tags svc.tags = []string{ - fmt.Sprintf("kube_service:%s", ksvc.Name), - fmt.Sprintf("kube_namespace:%s", ksvc.Namespace), + "kube_service:" + ksvc.Name, + "kube_namespace:" + ksvc.Namespace, } // Standard tags from the service's labels diff --git a/comp/core/autodiscovery/listeners/snmp.go b/comp/core/autodiscovery/listeners/snmp.go index 62564890d398b8..a77de8afcd77d0 100644 --- a/comp/core/autodiscovery/listeners/snmp.go +++ b/comp/core/autodiscovery/listeners/snmp.go @@ -643,11 +643,11 @@ func (s *SNMPService) GetExtraConfig(key string) (string, error) { case "version": return s.config.Version, nil case "timeout": - return fmt.Sprintf("%d", s.config.Timeout), nil + return strconv.Itoa(s.config.Timeout), nil case "retries": - return fmt.Sprintf("%d", s.config.Retries), nil + return strconv.Itoa(s.config.Retries), nil case "oid_batch_size": - return fmt.Sprintf("%d", s.config.OidBatchSize), nil + return strconv.Itoa(s.config.OidBatchSize), nil case "community": return s.config.Community, nil case "user": @@ -681,7 +681,7 @@ func (s *SNMPService) GetExtraConfig(key string) (string, error) { case "tags": return convertToCommaSepTags(s.config.Tags), nil case "min_collection_interval": - return fmt.Sprintf("%d", s.config.MinCollectionInterval), nil + return strconv.FormatUint(uint64(s.config.MinCollectionInterval), 10), nil case "interface_configs": ifConfigs := s.config.InterfaceConfigs[s.deviceIP] if len(ifConfigs) == 0 { diff --git a/comp/core/autodiscovery/providers/consul.go b/comp/core/autodiscovery/providers/consul.go index 85abf4642fb0dc..8532fc3b2ee8de 100644 --- a/comp/core/autodiscovery/providers/consul.go +++ b/comp/core/autodiscovery/providers/consul.go @@ -9,6 +9,7 @@ package providers import ( "context" + "errors" "fmt" "math" "net/url" @@ -271,7 +272,7 @@ func (p *ConsulConfigProvider) getCheckNames(ctx context.Context, key string) ([ names := string(raw) if names == "" { - err = fmt.Errorf("check_names is empty") + err = errors.New("check_names is empty") return nil, err } diff --git a/comp/core/autodiscovery/providers/datastreams/kafka_actions.go b/comp/core/autodiscovery/providers/datastreams/kafka_actions.go index 7d460969e66a1b..089507ad15cf4f 100644 --- a/comp/core/autodiscovery/providers/datastreams/kafka_actions.go +++ b/comp/core/autodiscovery/providers/datastreams/kafka_actions.go @@ -10,6 +10,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "sync" "time" @@ -255,7 +256,7 @@ func extractKafkaAuthFromInstance(cfgs []integration.Config, bootstrapServers st } if bootstrapServers == "" { - return out, nil, fmt.Errorf("kafka_consumer integration not found on this node") + return out, nil, errors.New("kafka_consumer integration not found on this node") } return out, nil, fmt.Errorf("kafka_consumer integration with bootstrap_servers=%s not found", bootstrapServers) } diff --git a/comp/core/autodiscovery/providers/endpointschecks.go b/comp/core/autodiscovery/providers/endpointschecks.go index ec7ba0d1133f8d..0d3f0cc7920f4a 100644 --- a/comp/core/autodiscovery/providers/endpointschecks.go +++ b/comp/core/autodiscovery/providers/endpointschecks.go @@ -9,7 +9,7 @@ package providers import ( "context" - "fmt" + stderrors "errors" "time" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" @@ -112,7 +112,7 @@ func getNodename(ctx context.Context) (string, error) { if pkgconfigsetup.Datadog().GetBool("cloud_foundry") { boshID := pkgconfigsetup.Datadog().GetString("bosh_id") if boshID == "" { - return "", fmt.Errorf("configuration variable cloud_foundry is set to true, but bosh_id is empty, can't retrieve node name") + return "", stderrors.New("configuration variable cloud_foundry is set to true, but bosh_id is empty, can't retrieve node name") } return boshID, nil } diff --git a/comp/core/autodiscovery/providers/process_log.go b/comp/core/autodiscovery/providers/process_log.go index 3e7bd1b203d4a6..830911f70c7011 100644 --- a/comp/core/autodiscovery/providers/process_log.go +++ b/comp/core/autodiscovery/providers/process_log.go @@ -235,7 +235,7 @@ func checkFileReadable(logPath string) error { if !utf8.Valid(buf) { log.Infof("Discovered log file %s is not a text file", logPath) - return fmt.Errorf("file is not a text file") + return errors.New("file is not a text file") } return nil @@ -500,7 +500,7 @@ func getServiceID(logFile string) string { func (p *processLogConfigProvider) getProcessTags(pid int32) ([]string, error) { if p.tagger == nil { - return nil, fmt.Errorf("tagger not available") + return nil, errors.New("tagger not available") } entityID := taggertypes.NewEntityID(taggertypes.Process, strconv.Itoa(int(pid))) return p.tagger.Tag(entityID, taggertypes.HighCardinality) diff --git a/comp/core/autodiscovery/providers/prometheus_pods_test.go b/comp/core/autodiscovery/providers/prometheus_pods_test.go index 0395d8505d1712..8276f785c53dba 100644 --- a/comp/core/autodiscovery/providers/prometheus_pods_test.go +++ b/comp/core/autodiscovery/providers/prometheus_pods_test.go @@ -9,7 +9,6 @@ package providers import ( "context" - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -104,8 +103,8 @@ func TestStream(t *testing.T) { configToSchedule := changes.Schedule[0] assert.Equal(t, "openmetrics", configToSchedule.Name) assert.Equal(t, names.PrometheusPods, configToSchedule.Provider) - assert.Equal(t, fmt.Sprintf("prometheus_pods:containerd://%s", testContainerID), configToSchedule.Source) - assert.Equal(t, []string{fmt.Sprintf("containerd://%s", testContainerID)}, configToSchedule.ADIdentifiers) + assert.Equal(t, "prometheus_pods:containerd://"+testContainerID, configToSchedule.Source) + assert.Equal(t, []string{"containerd://" + testContainerID}, configToSchedule.ADIdentifiers) // Remove the pod wmeta.Notify([]workloadmeta.CollectorEvent{ @@ -129,8 +128,8 @@ func TestStream(t *testing.T) { configToUnschedule := changes.Unschedule[0] assert.Equal(t, "openmetrics", configToUnschedule.Name) assert.Equal(t, names.PrometheusPods, configToUnschedule.Provider) - assert.Equal(t, fmt.Sprintf("prometheus_pods:containerd://%s", testContainerID), configToUnschedule.Source) - assert.Equal(t, []string{fmt.Sprintf("containerd://%s", testContainerID)}, configToUnschedule.ADIdentifiers) + assert.Equal(t, "prometheus_pods:containerd://"+testContainerID, configToUnschedule.Source) + assert.Equal(t, []string{"containerd://" + testContainerID}, configToUnschedule.ADIdentifiers) } func TestStream_NoAnnotations(t *testing.T) { diff --git a/comp/core/autodiscovery/providers/zookeeper_test.go b/comp/core/autodiscovery/providers/zookeeper_test.go index 69a8f07ae0d909..23515488b10178 100644 --- a/comp/core/autodiscovery/providers/zookeeper_test.go +++ b/comp/core/autodiscovery/providers/zookeeper_test.go @@ -9,7 +9,7 @@ package providers import ( "context" - "fmt" + "errors" "testing" "github.com/samuel/go-zookeeper/zk" @@ -59,14 +59,14 @@ func (m *zkTest) Children(key string) ([]string, *zk.Stat, error) { func TestZKGetIdentifiers(t *testing.T) { backend := &zkTest{} - backend.On("Children", "/test/").Return(nil, nil, fmt.Errorf("some error")).Times(1) + backend.On("Children", "/test/").Return(nil, nil, errors.New("some error")).Times(1) backend.On("Children", "/datadog/tpl").Return([]string{"nginx", "redis", "incomplete", "error"}, nil, nil).Times(1) expectedKeys := []string{checkNamePath, initConfigPath, instancePath} backend.On("Children", "/datadog/tpl/nginx").Return(expectedKeys, nil, nil).Times(1) backend.On("Children", "/datadog/tpl/redis").Return(append(expectedKeys, "an extra one"), nil, nil).Times(1) backend.On("Children", "/datadog/tpl/incomplete").Return([]string{checkNamePath, "other one"}, nil, nil).Times(1) - backend.On("Children", "/datadog/tpl/error").Return(nil, nil, fmt.Errorf("some error")).Times(1) + backend.On("Children", "/datadog/tpl/error").Return(nil, nil, errors.New("some error")).Times(1) zk := ZookeeperConfigProvider{client: backend} @@ -85,19 +85,19 @@ func TestZKGetIdentifiers(t *testing.T) { func TestZKGetTemplates(t *testing.T) { backend := &zkTest{} - backend.On("Get", "/error1/check_names").Return(nil, nil, fmt.Errorf("some error")).Times(1) + backend.On("Get", "/error1/check_names").Return(nil, nil, errors.New("some error")).Times(1) zk := ZookeeperConfigProvider{client: backend} res := zk.getTemplates("/error1/") assert.Nil(t, res) backend.On("Get", "/error2/check_names").Return([]byte("[\"first_name\"]"), nil, nil).Times(1) - backend.On("Get", "/error2/init_configs").Return(nil, nil, fmt.Errorf("some error")).Times(1) + backend.On("Get", "/error2/init_configs").Return(nil, nil, errors.New("some error")).Times(1) res = zk.getTemplates("/error2/") assert.Nil(t, res) backend.On("Get", "/error3/check_names").Return([]byte("[\"first_name\"]"), nil, nil).Times(1) backend.On("Get", "/error3/init_configs").Return([]byte("[{}]"), nil, nil).Times(1) - backend.On("Get", "/error3/instances").Return(nil, nil, fmt.Errorf("some error")).Times(1) + backend.On("Get", "/error3/instances").Return(nil, nil, errors.New("some error")).Times(1) res = zk.getTemplates("/error3/") assert.Nil(t, res) diff --git a/comp/core/diagnose/local/local.go b/comp/core/diagnose/local/local.go index 068d37d059fbe4..b07bc2b5b48e6f 100644 --- a/comp/core/diagnose/local/local.go +++ b/comp/core/diagnose/local/local.go @@ -9,7 +9,7 @@ package local import ( "context" - "fmt" + "errors" "time" "github.com/DataDog/datadog-agent/cmd/agent/common" @@ -106,7 +106,7 @@ func getLocalIntegrationConfigs( config config.Component) ([]integration.Config, error) { wmetaInstance, ok := wmeta.Get() if !ok { - return nil, fmt.Errorf("Workload Meta is not available") + return nil, errors.New("Workload Meta is not available") } common.LoadComponents(secretResolver, wmetaInstance, tagger, filterStore, ac, config.GetString("confd_path")) ac.LoadAndRun(context.Background()) diff --git a/comp/core/flare/flare.go b/comp/core/flare/flare.go index 5b8a1186d45a3c..11d6396858ecc9 100644 --- a/comp/core/flare/flare.go +++ b/comp/core/flare/flare.go @@ -7,6 +7,7 @@ package flare import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -99,11 +100,11 @@ func (f *flare) onAgentTaskEvent(taskType rcclienttypes.TaskType, task rcclientt } caseID, found := task.Config.TaskArgs["case_id"] if !found { - return true, fmt.Errorf("Case ID was not provided in the flare agent task") + return true, errors.New("Case ID was not provided in the flare agent task") } userHandle, found := task.Config.TaskArgs["user_handle"] if !found { - return true, fmt.Errorf("User handle was not provided in the flare agent task") + return true, errors.New("User handle was not provided in the flare agent task") } flareArgs := types.FlareArgs{} diff --git a/comp/core/flare/helpers/builder.go b/comp/core/flare/helpers/builder.go index 552b762c83c6ff..002a738a9897cb 100644 --- a/comp/core/flare/helpers/builder.go +++ b/comp/core/flare/helpers/builder.go @@ -150,7 +150,7 @@ func getArchiveName() string { logLevel, err := log.GetLogLevel() logLevelString := "" if err == nil { - logLevelString = fmt.Sprintf("-%s", logLevel.String()) + logLevelString = "-" + logLevel.String() } return fmt.Sprintf("datadog-agent-%s%s.zip", timeString, logLevelString) diff --git a/comp/core/flare/helpers/builder_test.go b/comp/core/flare/helpers/builder_test.go index ccc544eb6ecb9c..93cdcc5bc1d9e4 100644 --- a/comp/core/flare/helpers/builder_test.go +++ b/comp/core/flare/helpers/builder_test.go @@ -7,7 +7,7 @@ package helpers import ( "context" - "fmt" + "errors" "os" "path/filepath" "testing" @@ -118,7 +118,7 @@ func TestAddFileFromFunc(t *testing.T) { assertFileContent(t, fb, "", "test/AddFileFromFunc_nil") err := fb.AddFileFromFunc(FromSlash("test/AddFileFromFunc_error"), func() ([]byte, error) { - return nil, fmt.Errorf("some error") + return nil, errors.New("some error") }) assert.Error(t, err) assert.Equal(t, FromSlash("error collecting data for 'test/AddFileFromFunc_error': some error"), err.Error()) diff --git a/comp/core/flare/helpers/send_flare_test.go b/comp/core/flare/helpers/send_flare_test.go index f57256eeb5b6b1..f01a08fd0be631 100644 --- a/comp/core/flare/helpers/send_flare_test.go +++ b/comp/core/flare/helpers/send_flare_test.go @@ -11,6 +11,7 @@ import ( "io" "net/http" "net/http/httptest" + "strings" "testing" "time" @@ -171,10 +172,12 @@ func TestAnalyzeResponse(t *testing.T) { }) t.Run("unparseable-from-server-huge", func(t *testing.T) { - resp := "uhoh" + var respBuilder strings.Builder + respBuilder.WriteString("uhoh") for i := 0; i < 100; i++ { - resp += "\npad this out to be pretty long" + respBuilder.WriteString("\npad this out to be pretty long") } + resp := respBuilder.String() r := &http.Response{ StatusCode: 200, Header: http.Header{"Content-Type": []string{"application/json"}}, diff --git a/comp/core/gui/guiimpl/agent_test.go b/comp/core/gui/guiimpl/agent_test.go index 19909838bdc71a..44ce831b928753 100644 --- a/comp/core/gui/guiimpl/agent_test.go +++ b/comp/core/gui/guiimpl/agent_test.go @@ -6,7 +6,6 @@ package guiimpl import ( - "fmt" "io" "net/http" "net/http/httptest" @@ -101,7 +100,7 @@ func Test_getConfigSetting(t *testing.T) { c.SetWithoutSource(tt.configSetting, tt.configValue) } - path := fmt.Sprintf("/getConfig/%s", tt.configSetting) + path := "/getConfig/" + tt.configSetting req, err := http.NewRequest("GET", path, nil) require.NoError(t, err) diff --git a/comp/core/gui/guiimpl/auth.go b/comp/core/gui/guiimpl/auth.go index de6a3f04dfe67e..0e13c2265a3c71 100644 --- a/comp/core/gui/guiimpl/auth.go +++ b/comp/core/gui/guiimpl/auth.go @@ -10,6 +10,7 @@ import ( "crypto/sha256" "encoding/base64" "encoding/binary" + "errors" "fmt" "strings" "time" @@ -39,7 +40,7 @@ func (a *authenticator) ValidateToken(token string) error { // Split the token into the payload and HMAC sum parts := strings.Split(token, ".") if len(parts) != 3 { - return fmt.Errorf("invalid token format") + return errors.New("invalid token format") } // Check token version @@ -55,7 +56,7 @@ func (a *authenticator) ValidateToken(token string) error { // Ensure the payload contains enough bytes for issued and expiration times if len(payloadBytes) < 16 { - return fmt.Errorf("invalid payload") + return errors.New("invalid payload") } // Extract the issued and expiration times from the payload @@ -77,22 +78,22 @@ func (a *authenticator) ValidateToken(token string) error { // Check if the current time is before the issued time if now.Before(time.Unix(issuedTime, 0)) { - return fmt.Errorf("token is invalid") + return errors.New("token is invalid") } // special case: ignore expirationTime if duration is equal to 0 // Check if the current time is after the expiration time if expirationTime != issuedTime && now.After(time.Unix(expirationTime, 0)) { - return fmt.Errorf("token is expired") + return errors.New("token is expired") } if a.duration != 0 && now.After(time.Unix(issuedTime, 0).Add(a.duration)) { - return fmt.Errorf("token is expired") + return errors.New("token is expired") } // Check if the HMAC sum matches the expected HMAC sum if !hmac.Equal(hmacSum, expectedHmacSum) { - return fmt.Errorf("invalid token signature") + return errors.New("invalid token signature") } return nil diff --git a/comp/core/gui/guiimpl/platform_darwin.go b/comp/core/gui/guiimpl/platform_darwin.go index 06b81b921650b9..ef6f88be834e0f 100644 --- a/comp/core/gui/guiimpl/platform_darwin.go +++ b/comp/core/gui/guiimpl/platform_darwin.go @@ -6,7 +6,7 @@ package guiimpl import ( - "fmt" + "errors" template "github.com/DataDog/datadog-agent/pkg/template/html" ) @@ -30,5 +30,5 @@ func restartEnabled() bool { } func restart() error { - return fmt.Errorf("restarting the agent is not implemented on non-windows platforms") + return errors.New("restarting the agent is not implemented on non-windows platforms") } diff --git a/comp/core/gui/guiimpl/platform_nix.go b/comp/core/gui/guiimpl/platform_nix.go index de610d11d9189e..1da224e5de7bc2 100644 --- a/comp/core/gui/guiimpl/platform_nix.go +++ b/comp/core/gui/guiimpl/platform_nix.go @@ -8,7 +8,7 @@ package guiimpl import ( - "fmt" + "errors" template "github.com/DataDog/datadog-agent/pkg/template/html" ) @@ -27,5 +27,5 @@ func restartEnabled() bool { } func restart() error { - return fmt.Errorf("restarting the agent is not implemented on non-windows platforms") + return errors.New("restarting the agent is not implemented on non-windows platforms") } diff --git a/comp/core/healthprobe/impl/healthprobe_test.go b/comp/core/healthprobe/impl/healthprobe_test.go index 7774e420437654..d3262a28a28583 100644 --- a/comp/core/healthprobe/impl/healthprobe_test.go +++ b/comp/core/healthprobe/impl/healthprobe_test.go @@ -8,7 +8,7 @@ package healthprobeimpl import ( "context" - "fmt" + "errors" "net/http" "net/http/httptest" "testing" @@ -135,7 +135,7 @@ func TestHealthHandlerFails(t *testing.T) { responseRecorder := httptest.NewRecorder() healthHandler(false, logComponent, func() (health.Status, error) { - return health.Status{}, fmt.Errorf("fail to extract status") + return health.Status{}, errors.New("fail to extract status") }, responseRecorder, request) assert.Equal(t, http.StatusInternalServerError, responseRecorder.Code) diff --git a/comp/core/ipc/httphelpers/middleware.go b/comp/core/ipc/httphelpers/middleware.go index a1a967a66fdeac..704e7f6e1fa589 100644 --- a/comp/core/ipc/httphelpers/middleware.go +++ b/comp/core/ipc/httphelpers/middleware.go @@ -7,6 +7,7 @@ package httphelpers import ( "crypto/subtle" + "errors" "fmt" "net/http" "strings" @@ -20,7 +21,7 @@ func NewHTTPMiddleware(logger func(format string, params ...interface{}), authto auth := r.Header.Get("Authorization") if auth == "" { w.Header().Set("WWW-Authenticate", `Bearer realm="Datadog Agent"`) - err = fmt.Errorf("no session token provided") + err = errors.New("no session token provided") http.Error(w, err.Error(), 401) logger("invalid auth token for %s request to %s: %s", r.Method, r.RequestURI, err) return @@ -38,7 +39,7 @@ func NewHTTPMiddleware(logger func(format string, params ...interface{}), authto // The following comparison must be evaluated in constant time if len(tok) < 2 || !constantCompareStrings(tok[1], authtoken) { - err = fmt.Errorf("invalid session token") + err = errors.New("invalid session token") http.Error(w, err.Error(), 403) logger("invalid auth token for %s request to %s: %s", r.Method, r.RequestURI, err) return diff --git a/comp/core/secrets/impl/check_rights_windows.go b/comp/core/secrets/impl/check_rights_windows.go index 09c469e111b375..cfdad629ebd775 100644 --- a/comp/core/secrets/impl/check_rights_windows.go +++ b/comp/core/secrets/impl/check_rights_windows.go @@ -8,6 +8,7 @@ package secretsimpl import ( + "errors" "fmt" "os" "unsafe" @@ -24,7 +25,7 @@ func checkRights(filename string, allowGroupExec bool) error { // this function ignore `allowGroupExec` since it was design for the cluster-agent, // but the cluster-agent is not delivered for windows. if allowGroupExec { - return fmt.Errorf("the option 'allowGroupExec=true' is not allowed on windows") + return errors.New("the option 'allowGroupExec=true' is not allowed on windows") } if _, err := os.Stat(filename); err != nil { if os.IsNotExist(err) { diff --git a/comp/core/secrets/impl/fetch_secret.go b/comp/core/secrets/impl/fetch_secret.go index 99f5d2d211f47c..c56176582be47d 100644 --- a/comp/core/secrets/impl/fetch_secret.go +++ b/comp/core/secrets/impl/fetch_secret.go @@ -112,7 +112,7 @@ func (r *secretResolver) fetchSecretBackendVersion() (string, error) { // Only get version when secret_backend_type is used if r.backendType == "" { - return "", fmt.Errorf("version only supported when secret_backend_type is configured") + return "", errors.New("version only supported when secret_backend_type is configured") } ctx, cancel := context.WithTimeout(context.Background(), @@ -149,7 +149,7 @@ func (r *secretResolver) fetchSecretBackendVersion() (string, error) { if err != nil { log.Debugf("secret_backend_command --version stderr: %s", stderr.buf.String()) if ctx.Err() == context.DeadlineExceeded { - return "", fmt.Errorf("version command timeout") + return "", errors.New("version command timeout") } return "", fmt.Errorf("version command failed: %w", err) } diff --git a/comp/core/secrets/impl/fetch_secret_test.go b/comp/core/secrets/impl/fetch_secret_test.go index e29456549a73f6..14bc590bd6a66d 100644 --- a/comp/core/secrets/impl/fetch_secret_test.go +++ b/comp/core/secrets/impl/fetch_secret_test.go @@ -8,6 +8,7 @@ package secretsimpl import ( "bytes" "context" + "errors" "fmt" "maps" "os" @@ -38,7 +39,7 @@ func build(t *testing.T, outTarget string) { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr // Append to the command's env vars, which prevents them from affecting other tests - cmd.Env = append(cmd.Env, []string{"GOPROXY=off", "GOPRIVATE=*", fmt.Sprintf("GOCACHE=%s", cacheDir)}...) + cmd.Env = append(cmd.Env, []string{"GOPROXY=off", "GOPRIVATE=*", "GOCACHE=" + cacheDir}...) err := cmd.Run() if err != nil { t.Fatalf("Could not compile secret backend binary: %s", err) @@ -189,7 +190,7 @@ func TestExecCommandError(t *testing.T) { func TestFetchSecretExecError(t *testing.T) { tel := nooptelemetry.GetCompatComponent() resolver := newEnabledSecretResolver(tel) - resolver.commandHookFunc = func(string) ([]byte, error) { return nil, fmt.Errorf("some error") } + resolver.commandHookFunc = func(string) ([]byte, error) { return nil, errors.New("some error") } _, err := resolver.fetchSecret([]string{"handle1", "handle2"}) assert.NotNil(t, err) } diff --git a/comp/core/secrets/impl/secrets_test.go b/comp/core/secrets/impl/secrets_test.go index c857970575fed5..d169d7df599cdf 100644 --- a/comp/core/secrets/impl/secrets_test.go +++ b/comp/core/secrets/impl/secrets_test.go @@ -6,6 +6,7 @@ package secretsimpl import ( + "errors" "fmt" "math/rand" "os" @@ -243,7 +244,7 @@ func TestResolveNoCommand(t *testing.T) { tel := nooptelemetry.GetCompatComponent() resolver := newEnabledSecretResolver(tel) resolver.fetchHookFunc = func([]string) (map[string]string, error) { - return nil, fmt.Errorf("some error") + return nil, errors.New("some error") } // since we didn't set any command this should return without any error @@ -258,7 +259,7 @@ func TestResolveSecretError(t *testing.T) { resolver.backendCommand = "some_command" resolver.fetchHookFunc = func([]string) (map[string]string, error) { - return nil, fmt.Errorf("some error") + return nil, errors.New("some error") } _, err := resolver.Resolve(testConf, "test", "", "") diff --git a/comp/core/secrets/impl/status_test.go b/comp/core/secrets/impl/status_test.go index f6172e80d6b993..82510276dc1b61 100644 --- a/comp/core/secrets/impl/status_test.go +++ b/comp/core/secrets/impl/status_test.go @@ -7,7 +7,7 @@ package secretsimpl import ( "bytes" - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -118,7 +118,7 @@ func TestSecretStatusWithPermissions(t *testing.T) { require.Contains(t, stats, "executablePermissions") assert.Equal(t, "OK, the executable has the correct permissions", stats["executablePermissions"]) - checkRightsFunc = func(_ string, _ bool) error { return fmt.Errorf("some error") } + checkRightsFunc = func(_ string, _ bool) error { return errors.New("some error") } stats = make(map[string]interface{}) err = resolver.JSON(false, stats) diff --git a/comp/core/secrets/utils/utils.go b/comp/core/secrets/utils/utils.go index 4d1de3780a0777..7a25d887fd313c 100644 --- a/comp/core/secrets/utils/utils.go +++ b/comp/core/secrets/utils/utils.go @@ -8,7 +8,7 @@ package utils import ( - "fmt" + "errors" "slices" "strconv" "strings" @@ -106,6 +106,6 @@ func (w *Walker) Walk(data *interface{}) error { case []interface{}: return w.slice(v, nil) default: - return fmt.Errorf("given data is not of expected type map not slice") + return errors.New("given data is not of expected type map not slice") } } diff --git a/comp/core/secrets/utils/walker_test.go b/comp/core/secrets/utils/walker_test.go index 468fe1ff75d628..7e223598688fc9 100644 --- a/comp/core/secrets/utils/walker_test.go +++ b/comp/core/secrets/utils/walker_test.go @@ -6,7 +6,7 @@ package utils import ( - "fmt" + "errors" "sort" "testing" @@ -52,7 +52,7 @@ func TestWalkerError(t *testing.T) { w := Walker{ Resolver: func([]string, string) (string, error) { - return "", fmt.Errorf("some error") + return "", errors.New("some error") }, } diff --git a/comp/core/settings/settingsimpl/settingsimpl_test.go b/comp/core/settings/settingsimpl/settingsimpl_test.go index 8690e03866a8ee..adba8f984b2d6f 100644 --- a/comp/core/settings/settingsimpl/settingsimpl_test.go +++ b/comp/core/settings/settingsimpl/settingsimpl_test.go @@ -338,7 +338,7 @@ func TestRuntimeSettings(t *testing.T) { ts := httptest.NewServer(router) defer ts.Close() - requestBody := fmt.Sprintf("value=%s", html.EscapeString("fancy")) + requestBody := "value=" + html.EscapeString("fancy") request, err := http.NewRequest("POST", ts.URL+"/config/foo", bytes.NewBuffer([]byte(requestBody))) require.NoError(t, err) request.Header.Set("Content-Type", "application/x-www-form-urlencoded") diff --git a/comp/core/status/statusimpl/status_test.go b/comp/core/status/statusimpl/status_test.go index f5707414648e6d..3f368f58ac3811 100644 --- a/comp/core/status/statusimpl/status_test.go +++ b/comp/core/status/statusimpl/status_test.go @@ -7,6 +7,7 @@ package statusimpl import ( "encoding/json" + "errors" "fmt" "io" "maps" @@ -48,7 +49,7 @@ func (m mockProvider) Section() string { func (m mockProvider) JSON(_ bool, stats map[string]interface{}) error { if m.returnError { - return fmt.Errorf("JSON error") + return errors.New("JSON error") } maps.Copy(stats, m.data) @@ -58,7 +59,7 @@ func (m mockProvider) JSON(_ bool, stats map[string]interface{}) error { func (m mockProvider) Text(_ bool, buffer io.Writer) error { if m.returnError { - return fmt.Errorf("Text error") + return errors.New("Text error") } _, err := buffer.Write([]byte(m.text)) @@ -67,7 +68,7 @@ func (m mockProvider) Text(_ bool, buffer io.Writer) error { func (m mockProvider) HTML(_ bool, buffer io.Writer) error { if m.returnError { - return fmt.Errorf("HTML error") + return errors.New("HTML error") } _, err := buffer.Write([]byte(m.html)) @@ -93,7 +94,7 @@ func (m mockHeaderProvider) Name() string { func (m mockHeaderProvider) JSON(_ bool, stats map[string]interface{}) error { if m.returnError { - return fmt.Errorf("JSON error") + return errors.New("JSON error") } maps.Copy(stats, m.data) @@ -103,7 +104,7 @@ func (m mockHeaderProvider) JSON(_ bool, stats map[string]interface{}) error { func (m mockHeaderProvider) Text(_ bool, buffer io.Writer) error { if m.returnError { - return fmt.Errorf("Text error") + return errors.New("Text error") } _, err := buffer.Write([]byte(m.text)) @@ -112,7 +113,7 @@ func (m mockHeaderProvider) Text(_ bool, buffer io.Writer) error { func (m mockHeaderProvider) HTML(_ bool, buffer io.Writer) error { if m.returnError { - return fmt.Errorf("HTML error") + return errors.New("HTML error") } _, err := buffer.Write([]byte(m.html)) diff --git a/comp/core/tagger/collectors/workloadmeta_test.go b/comp/core/tagger/collectors/workloadmeta_test.go index 60230354a604ec..35300c445e462d 100644 --- a/comp/core/tagger/collectors/workloadmeta_test.go +++ b/comp/core/tagger/collectors/workloadmeta_test.go @@ -48,9 +48,9 @@ func TestHandleKubePod(t *testing.T) { ) standardTags := []string{ - fmt.Sprintf("env:%s", env), - fmt.Sprintf("service:%s", svc), - fmt.Sprintf("version:%s", version), + "env:" + env, + "service:" + svc, + "version:" + version, } podEntityID := workloadmeta.EntityID{ @@ -232,15 +232,15 @@ func TestHandleKubePod(t *testing.T) { "gitcommit:foobar", }, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "kube_ownerref_name:datadog-agent", }, LowCardTags: append([]string{ - fmt.Sprintf("kube_app_instance:%s", podName), - fmt.Sprintf("kube_app_name:%s", svc), - fmt.Sprintf("kube_app_version:%s", version), - fmt.Sprintf("kube_deployment:%s", svc), - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_app_instance:" + podName, + "kube_app_name:" + svc, + "kube_app_version:" + version, + "kube_deployment:" + svc, + "kube_namespace:" + podNamespace, "component:agent", "kube_app_component:agent", "kube_app_managed_by:helm", @@ -295,10 +295,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:statefulset", "persistentvolumeclaim:pvc-0", }, @@ -308,15 +308,15 @@ func TestHandleKubePod(t *testing.T) { Source: podSource, EntityID: noEnvContainerTaggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_id:%s", noEnvContainerID), + "container_id:" + noEnvContainerID, fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), }, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), - fmt.Sprintf("kube_container_name:%s", containerName), + "kube_namespace:" + podNamespace, + "kube_container_name:" + containerName, "image_id:datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", "image_name:datadog/agent", "image_tag:latest", @@ -350,10 +350,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, }, StandardTags: []string{}, }, @@ -361,15 +361,15 @@ func TestHandleKubePod(t *testing.T) { Source: podSource, EntityID: fullyFleshedContainerTaggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_id:%s", fullyFleshedContainerID), + "container_id:" + fullyFleshedContainerID, fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), }, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: append([]string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), - fmt.Sprintf("kube_container_name:%s", containerName), + "kube_namespace:" + podNamespace, + "kube_container_name:" + containerName, "image_id:datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", "image_name:datadog/agent", "image_tag:latest", @@ -401,10 +401,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, }, StandardTags: []string{}, }, @@ -412,15 +412,15 @@ func TestHandleKubePod(t *testing.T) { Source: podSource, EntityID: types.NewEntityID(types.ContainerID, otelEnvContainerID), HighCardTags: []string{ - fmt.Sprintf("container_id:%s", otelEnvContainerID), + "container_id:" + otelEnvContainerID, fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), }, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: append([]string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), - fmt.Sprintf("kube_container_name:%s", containerName), + "kube_namespace:" + podNamespace, + "kube_container_name:" + containerName, "image_id:datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", "image_name:datadog/agent", "image_tag:latest", @@ -456,10 +456,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, }, StandardTags: []string{}, }, @@ -467,15 +467,15 @@ func TestHandleKubePod(t *testing.T) { Source: podSource, EntityID: noEnvContainerTaggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_id:%s", noEnvContainerID), + "container_id:" + noEnvContainerID, fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), }, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: append([]string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), - fmt.Sprintf("kube_container_name:%s", containerName), + "kube_namespace:" + podNamespace, + "kube_container_name:" + containerName, }, standardTags...), StandardTags: standardTags, }, @@ -501,11 +501,11 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "oshift_deployment:gitlab-ce-1", }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "oshift_deployment_config:gitlab-ce", }, StandardTags: []string{}, @@ -531,10 +531,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "dd_remote_config_id:id", "dd_remote_config_rev:123", }, @@ -563,11 +563,11 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "kube_ownerref_name:owner_name", }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:daemonset", "kube_daemon_set:owner_name", }, @@ -596,11 +596,11 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "kube_ownerref_name:owner_name", }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:replicationcontroller", "kube_replication_controller:owner_name", }, @@ -632,11 +632,11 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "kube_ownerref_name:owner_name", }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:statefulset", "kube_stateful_set:owner_name", "persistentvolumeclaim:pvc-0", @@ -666,11 +666,11 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "kube_ownerref_name:owner_name", }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:job", "kube_job:owner_name", }, @@ -703,12 +703,12 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "kube_ownerref_name:some_cronjob-123", "kube_job:some_cronjob-123", }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:job", "kube_cronjob:some_cronjob", }, @@ -737,11 +737,11 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "kube_ownerref_name:owner_name", }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:replicaset", "kube_replica_set:owner_name", }, @@ -779,11 +779,11 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, "kube_ownerref_name:some_deployment-bcd2", }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:replicaset", "kube_replica_set:some_deployment-bcd2", "kube_deployment:some_deployment", @@ -811,10 +811,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "eks_fargate_node:foobar", }, StandardTags: []string{}, @@ -839,10 +839,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_autoscaler_kind:datadogpodautoscaler", }, StandardTags: []string{}, @@ -869,10 +869,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, }, StandardTags: []string{}, }, @@ -901,10 +901,10 @@ func TestHandleKubePod(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "gpu_vendor:nvidia", }, StandardTags: []string{}, @@ -913,15 +913,15 @@ func TestHandleKubePod(t *testing.T) { Source: podSource, EntityID: fullyFleshedContainerTaggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_id:%s", fullyFleshedContainerID), + "container_id:" + fullyFleshedContainerID, fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), }, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: append([]string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), - fmt.Sprintf("kube_container_name:%s", containerName), + "kube_namespace:" + podNamespace, + "kube_container_name:" + containerName, "image_id:datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", "image_name:datadog/agent", "image_tag:latest", @@ -1034,10 +1034,10 @@ func TestHandleKubePodWithoutPvcAsTags(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, "kube_ownerref_kind:statefulset", }, StandardTags: []string{}, @@ -1046,15 +1046,15 @@ func TestHandleKubePodWithoutPvcAsTags(t *testing.T) { Source: podSource, EntityID: types.NewEntityID(types.ContainerID, noEnvContainerID), HighCardTags: []string{ - fmt.Sprintf("container_id:%s", noEnvContainerID), + "container_id:" + noEnvContainerID, fmt.Sprintf("display_container_name:%s_%s", runtimeContainerName, podName), }, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), - fmt.Sprintf("kube_container_name:%s", containerName), + "kube_namespace:" + podNamespace, + "kube_container_name:" + containerName, "image_id:datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", "image_name:datadog/agent", "image_tag:latest", @@ -1098,9 +1098,9 @@ func TestHandleKubePodNoContainerName(t *testing.T) { ) standardTags := []string{ - fmt.Sprintf("env:%s", env), - fmt.Sprintf("service:%s", svc), - fmt.Sprintf("version:%s", version), + "env:" + env, + "service:" + svc, + "version:" + version, } podEntityID := workloadmeta.EntityID{ @@ -1183,10 +1183,10 @@ func TestHandleKubePodNoContainerName(t *testing.T) { EntityID: podTaggerEntityID, HighCardTags: []string{}, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: []string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), + "kube_namespace:" + podNamespace, }, StandardTags: []string{}, }, @@ -1194,15 +1194,15 @@ func TestHandleKubePodNoContainerName(t *testing.T) { Source: podSource, EntityID: fullyFleshedContainerTaggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_id:%s", fullyFleshedContainerID), + "container_id:" + fullyFleshedContainerID, fmt.Sprintf("display_container_name:%s_%s", containerName, podName), }, OrchestratorCardTags: []string{ - fmt.Sprintf("pod_name:%s", podName), + "pod_name:" + podName, }, LowCardTags: append([]string{ - fmt.Sprintf("kube_namespace:%s", podNamespace), - fmt.Sprintf("kube_container_name:%s", containerName), + "kube_namespace:" + podNamespace, + "kube_container_name:" + containerName, "image_id:datadog/agent@sha256:a63d3f66fb2f69d955d4f2ca0b229385537a77872ffc04290acae65aed5317d2", "image_name:datadog/agent", "image_tag:latest", @@ -1235,7 +1235,7 @@ func TestHandleKubeMetadata(t *testing.T) { kubeMetadataEntityID := workloadmeta.EntityID{ Kind: workloadmeta.KindKubernetesMetadata, - ID: fmt.Sprintf("namespaces//%s", namespace), + ID: "namespaces//" + namespace, } store := fxutil.Test[workloadmetamock.Mock](t, fx.Options( @@ -1248,7 +1248,7 @@ func TestHandleKubeMetadata(t *testing.T) { store.Set(&workloadmeta.Container{ EntityID: workloadmeta.EntityID{ Kind: workloadmeta.KindKubernetesMetadata, - ID: fmt.Sprintf("namespaces//%s", namespace), + ID: "namespaces//" + namespace, }, EntityMeta: workloadmeta.EntityMeta{ Name: namespace, @@ -1697,9 +1697,9 @@ func TestHandleContainer(t *testing.T) { ) standardTags := []string{ - fmt.Sprintf("env:%s", env), - fmt.Sprintf("service:%s", svc), - fmt.Sprintf("version:%s", version), + "env:" + env, + "service:" + svc, + "version:" + version, } entityID := workloadmeta.EntityID{ @@ -1743,8 +1743,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: append([]string{ @@ -1788,14 +1788,14 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: append([]string{ "owner_team:container-integrations", - fmt.Sprintf("git.repository_url:%s", repositoryURL), - fmt.Sprintf("git.commit.sha:%s", commitSHA), + "git.repository_url:" + repositoryURL, + "git.commit.sha:" + commitSHA, }, standardTags...), StandardTags: standardTags, }, @@ -1826,8 +1826,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: append([]string{ @@ -1862,8 +1862,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: append([]string{ @@ -1898,8 +1898,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: append([]string{ @@ -1922,7 +1922,7 @@ func TestHandleContainer(t *testing.T) { "TIER": "node", // otel standard tags - "OTEL_RESOURCE_ATTRIBUTES": fmt.Sprintf("service.name=, = , =%s", env), + "OTEL_RESOURCE_ATTRIBUTES": "service.name=, = , =" + env, }, }, envAsTags: map[string]string{ @@ -1933,8 +1933,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -1968,8 +1968,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, "app_name:datadog-agent", }, OrchestratorCardTags: []string{}, @@ -2005,8 +2005,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2039,8 +2039,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2072,8 +2072,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2106,8 +2106,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{ "mesos_task:system_dd-agent.dcc75b42-4b87-11e7-9a62-70b3d5800001", @@ -2150,8 +2150,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, "rancher_container:testAD-redis-1", }, OrchestratorCardTags: []string{}, @@ -2185,8 +2185,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2214,8 +2214,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2242,8 +2242,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2269,8 +2269,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2297,8 +2297,8 @@ func TestHandleContainer(t *testing.T) { Source: containerSource, EntityID: taggerEntityID, HighCardTags: []string{ - fmt.Sprintf("container_name:%s", containerName), - fmt.Sprintf("container_id:%s", entityID.ID), + "container_name:" + containerName, + "container_id:" + entityID.ID, }, OrchestratorCardTags: []string{}, LowCardTags: []string{ @@ -2884,16 +2884,16 @@ func TestHandleProcess(t *testing.T) { Source: processSource, EntityID: types.NewEntityID(types.Process, pid), LowCardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "env:" + envFromDD, + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, StandardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "env:" + envFromDD, + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, }, }, @@ -2917,14 +2917,14 @@ func TestHandleProcess(t *testing.T) { Source: processSource, EntityID: types.NewEntityID(types.Process, pid), LowCardTags: []string{ - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, StandardTags: []string{ - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, }, }, @@ -2983,17 +2983,17 @@ func TestHandleProcess(t *testing.T) { EntityID: types.NewEntityID(types.Process, pid), LowCardTags: []string{ "entrypoint.name:com.example.Main", - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("service:%s", serviceNameFromDD), + "env:" + envFromDD, + "service:" + serviceNameFromDD, "service.type:tomcat", - fmt.Sprintf("version:%s", versionFromDD), + "version:" + versionFromDD, }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, StandardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "env:" + envFromDD, + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, }, }, @@ -3052,14 +3052,14 @@ func TestHandleProcess(t *testing.T) { "entrypoint.name:com.myapp.Server1", "entrypoint.name:com.myapp.Server2", "entrypoint.workdir:myapp", - fmt.Sprintf("service:%s", serviceNameFromDD), + "service:" + serviceNameFromDD, "service.runtime:openjdk-17", "service.type:web-server", }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, StandardTags: []string{ - fmt.Sprintf("service:%s", serviceNameFromDD), + "service:" + serviceNameFromDD, }, }, }, @@ -3087,13 +3087,13 @@ func TestHandleProcess(t *testing.T) { EntityID: types.NewEntityID(types.Process, pid), LowCardTags: []string{ "entrypoint.name:my.package.Main", - fmt.Sprintf("service:%s", serviceNameFromDD), + "service:" + serviceNameFromDD, "service.framework:nodejs", }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, StandardTags: []string{ - fmt.Sprintf("service:%s", serviceNameFromDD), + "service:" + serviceNameFromDD, }, }, }, @@ -3140,7 +3140,7 @@ func TestHandleProcess(t *testing.T) { Source: processSource, EntityID: types.NewEntityID(types.Process, pid), LowCardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), + "env:" + envFromDD, "framework:express", "runtime:nodejs", "tracer_service_env:dev", @@ -3149,15 +3149,15 @@ func TestHandleProcess(t *testing.T) { "tracer_service_name:second-tracer-service", "tracer_service_version:1.0.0", "tracer_service_version:2.0.0", - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, StandardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "env:" + envFromDD, + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, }, }, @@ -3180,11 +3180,11 @@ func TestHandleProcess(t *testing.T) { Source: processSource, EntityID: types.NewEntityID(types.Process, pid), LowCardTags: []string{ - fmt.Sprintf("gpu_device:%s", strings.ToLower(strings.ReplaceAll(gpuDevice, " ", "_"))), - fmt.Sprintf("gpu_driver_version:%s", gpuDriverVersion), - fmt.Sprintf("gpu_uuid:%s", strings.ToLower(gpuUUID)), - fmt.Sprintf("gpu_vendor:%s", strings.ToLower(gpuVendor)), - fmt.Sprintf("gpu_virtualization_mode:%s", gpuVirtMode), + "gpu_device:" + strings.ToLower(strings.ReplaceAll(gpuDevice, " ", "_")), + "gpu_driver_version:" + gpuDriverVersion, + "gpu_uuid:" + strings.ToLower(gpuUUID), + "gpu_vendor:" + strings.ToLower(gpuVendor), + "gpu_virtualization_mode:" + gpuVirtMode, }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, @@ -3217,21 +3217,21 @@ func TestHandleProcess(t *testing.T) { Source: processSource, EntityID: types.NewEntityID(types.Process, pid), LowCardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("gpu_device:%s", strings.ToLower(strings.ReplaceAll(gpuDevice, " ", "_"))), - fmt.Sprintf("gpu_driver_version:%s", gpuDriverVersion), - fmt.Sprintf("gpu_uuid:%s", strings.ToLower(gpuUUID)), - fmt.Sprintf("gpu_vendor:%s", strings.ToLower(gpuVendor)), - fmt.Sprintf("gpu_virtualization_mode:%s", gpuVirtMode), - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "env:" + envFromDD, + "gpu_device:" + strings.ToLower(strings.ReplaceAll(gpuDevice, " ", "_")), + "gpu_driver_version:" + gpuDriverVersion, + "gpu_uuid:" + strings.ToLower(gpuUUID), + "gpu_vendor:" + strings.ToLower(gpuVendor), + "gpu_virtualization_mode:" + gpuVirtMode, + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, StandardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "env:" + envFromDD, + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, }, }, @@ -3261,16 +3261,16 @@ func TestHandleProcess(t *testing.T) { Source: processSource, EntityID: types.NewEntityID(types.Process, pid), LowCardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "env:" + envFromDD, + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, OrchestratorCardTags: []string{}, HighCardTags: []string{}, StandardTags: []string{ - fmt.Sprintf("env:%s", envFromDD), - fmt.Sprintf("service:%s", serviceNameFromDD), - fmt.Sprintf("version:%s", versionFromDD), + "env:" + envFromDD, + "service:" + serviceNameFromDD, + "version:" + versionFromDD, }, }, }, diff --git a/comp/core/tagger/impl-remote/remote.go b/comp/core/tagger/impl-remote/remote.go index 2b69265df23093..677956c5daf792 100644 --- a/comp/core/tagger/impl-remote/remote.go +++ b/comp/core/tagger/impl-remote/remote.go @@ -209,7 +209,7 @@ func getOverridedAuthToken(ctx context.Context, log log.Component, cfg config.Co select { case <-ctx.Done(): - return "", fmt.Errorf("unable to read the artifact in the given time") + return "", errors.New("unable to read the artifact in the given time") case <-time.After(time.Second): // waiting 1 second before retrying } @@ -338,7 +338,7 @@ func (t *remoteTagger) queryContainerIDFromOriginInfo(originInfo origindetection // Create the context with the auth token queryCtx, queryCancel := context.WithTimeout( metadata.NewOutgoingContext(t.ctx, metadata.MD{ - "authorization": []string{fmt.Sprintf("Bearer %s", t.authToken)}, // TODO IPC: implement GRPC client + "authorization": []string{"Bearer " + t.authToken}, // TODO IPC: implement GRPC client }), 1*time.Second, ) @@ -398,7 +398,7 @@ func (t *remoteTagger) Standard(entityID types.EntityID) ([]string, error) { func (t *remoteTagger) GetEntity(entityID types.EntityID) (*types.Entity, error) { entity := t.store.getEntity(entityID) if entity == nil { - return nil, fmt.Errorf("Entity not found for entityID") + return nil, errors.New("Entity not found for entityID") } return entity, nil @@ -597,7 +597,7 @@ func (t *remoteTagger) startTaggerStream(maxElapsed time.Duration) error { t.streamCtx, t.streamCancel = context.WithCancel( metadata.NewOutgoingContext(t.ctx, metadata.MD{ - "authorization": []string{fmt.Sprintf("Bearer %s", t.authToken)}, // TODO IPC: implement GRPC client + "authorization": []string{"Bearer " + t.authToken}, // TODO IPC: implement GRPC client }), ) diff --git a/comp/core/tagger/impl-remote/remote_test.go b/comp/core/tagger/impl-remote/remote_test.go index 3033889d04a604..a4df2bad56be71 100644 --- a/comp/core/tagger/impl-remote/remote_test.go +++ b/comp/core/tagger/impl-remote/remote_test.go @@ -8,7 +8,6 @@ package remoteimpl import ( "crypto/tls" "encoding/json" - "fmt" "net/http" "net/http/httptest" "os" @@ -16,6 +15,7 @@ import ( "testing" "time" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -160,7 +160,7 @@ func TestNewComponentWithOverride(t *testing.T) { }), tagger.WithOverrideAuthTokenGetter(func(_ configmodel.Reader) (string, error) { - return "", fmt.Errorf("auth token getter always fails") + return "", errors.New("auth token getter always fails") })), Telemetry: nooptelemetry.GetCompatComponent(), IPC: ipcComp, diff --git a/comp/core/tagger/impl/tagger.go b/comp/core/tagger/impl/tagger.go index 45cff2f9317b04..bd694d6c5c5a71 100644 --- a/comp/core/tagger/impl/tagger.go +++ b/comp/core/tagger/impl/tagger.go @@ -15,6 +15,7 @@ package taggerimpl import ( "context" "encoding/json" + "errors" "fmt" "net/http" "sync" @@ -200,7 +201,7 @@ func (t *localTagger) getTags(entityID types.EntityID, cardinality types.TagCard } if entityID.Empty() { t.telemetryStore.QueriesByCardinality(cardinality).EmptyEntityID.Inc() - return tagset.HashedTags{}, fmt.Errorf("empty entity ID") + return tagset.HashedTags{}, errors.New("empty entity ID") } cachedTags := t.tagStore.LookupHashedWithEntityStr(entityID, cardinality) @@ -289,7 +290,7 @@ func (t *localTagger) GenerateContainerIDFromOriginInfo(originInfo origindetecti // It triggers a tagger fetch if the no tags are found func (t *localTagger) Standard(entityID types.EntityID) ([]string, error) { if entityID.Empty() { - return nil, fmt.Errorf("empty entity ID") + return nil, errors.New("empty entity ID") } return t.tagStore.LookupStandard(entityID) diff --git a/comp/core/tagger/impl/tagger_test.go b/comp/core/tagger/impl/tagger_test.go index aa65d8ffd8b38e..d8c9cbcd1f3c84 100644 --- a/comp/core/tagger/impl/tagger_test.go +++ b/comp/core/tagger/impl/tagger_test.go @@ -659,7 +659,7 @@ func TestEnrichTags(t *testing.T) { { name: "with local data (podUID, containerIDFromSocket) and high cardinality, APM origin", originInfo: taggertypes.OriginInfo{ - ContainerIDFromSocket: fmt.Sprintf("container_id://%s", containerID), + ContainerIDFromSocket: "container_id://" + containerID, LocalData: origindetection.LocalData{ PodUID: podUID, }, diff --git a/comp/core/tagger/server/server.go b/comp/core/tagger/server/server.go index f66b7ab72e9db0..791cc1c58dbbb3 100644 --- a/comp/core/tagger/server/server.go +++ b/comp/core/tagger/server/server.go @@ -8,7 +8,7 @@ package server import ( "context" - "fmt" + "errors" "time" "google.golang.org/grpc/codes" @@ -105,7 +105,7 @@ func (s *Server) TaggerStreamEntities(in *pb.StreamTagsRequest, out pb.AgentSecu if streamingID == "" { streamingID = uuid.New().String() } - subscriptionID := fmt.Sprintf("streaming-client-%s", streamingID) + subscriptionID := "streaming-client-" + streamingID // initBurst is a flag indicating if the initial sync is still in progress or not // true means the sync hasn't yet been finalised @@ -137,7 +137,7 @@ func (s *Server) TaggerStreamEntities(in *pb.StreamTagsRequest, out pb.AgentSecu case events, ok := <-subscription.EventsChan(): if !ok { log.Warnf("subscriber channel closed, client will reconnect") - return fmt.Errorf("subscriber channel closed") + return errors.New("subscriber channel closed") } ticker.Reset(streamKeepAliveInterval) diff --git a/comp/core/telemetry/options.go b/comp/core/telemetry/options.go index 9828879a052fb4..ed992990a6d1cc 100644 --- a/comp/core/telemetry/options.go +++ b/comp/core/telemetry/options.go @@ -5,8 +5,6 @@ package telemetry -import "fmt" - // Options for telemetry metrics. // Creating an Options struct without specifying any of its fields should be the // equivalent of using the DefaultOptions var. @@ -39,7 +37,7 @@ func (opts *Options) NameWithSeparator(subsystem, name string) string { // Prefix metrics with a _, prometheus will add a second _ // It will create metrics with a custom separator and // will let us replace it to a dot later in the process. - return fmt.Sprintf("_%s", name) + return "_" + name } return name diff --git a/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm_test.go b/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm_test.go index 0ae8b5205856a2..05d49cfafabf06 100644 --- a/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm_test.go +++ b/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm_test.go @@ -463,7 +463,7 @@ func TestPullNoAppNameWithoutDCA(t *testing.T) { container, err := workloadmetaStore.GetContainer(activeContainerWithoutProperties.Handle()) require.NoError(t, err) - assert.Contains(t, container.CollectorTags, fmt.Sprintf("container_name:%s", activeContainerWithoutProperties.Handle())) + assert.Contains(t, container.CollectorTags, "container_name:"+activeContainerWithoutProperties.Handle()) } func TestPullAppNameWithGardenPropertiesWithoutDCA(t *testing.T) { @@ -500,5 +500,5 @@ func TestPullAppNameWithGardenPropertiesWithoutDCA(t *testing.T) { container, err := workloadmetaStore.GetContainer(activeContainerWithProperties.Handle()) require.NoError(t, err) - assert.Contains(t, container.CollectorTags, fmt.Sprintf("container_name:%s", "app-name-1")) + assert.Contains(t, container.CollectorTags, "container_name:"+"app-name-1") } diff --git a/comp/core/workloadmeta/collectors/internal/containerd/container_builder.go b/comp/core/workloadmeta/collectors/internal/containerd/container_builder.go index 2e1789c1686fdb..307aa00f0508d3 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/container_builder.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/container_builder.go @@ -34,7 +34,7 @@ const kataRuntimePrefix = "io.containerd.kata" // buildWorkloadMetaContainer generates a workloadmeta.Container from a containerd.Container func buildWorkloadMetaContainer(namespace string, container containerd.Container, containerdClient cutil.ContainerdItf, store workloadmeta.Component) (workloadmeta.Container, error) { if container == nil { - return workloadmeta.Container{}, fmt.Errorf("cannot build workloadmeta container from nil containerd container") + return workloadmeta.Container{}, errors.New("cannot build workloadmeta container from nil containerd container") } info, err := containerdClient.Info(namespace, container) diff --git a/comp/core/workloadmeta/collectors/internal/containerd/container_builder_test.go b/comp/core/workloadmeta/collectors/internal/containerd/container_builder_test.go index f686c059ca03e9..10846003c772be 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/container_builder_test.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/container_builder_test.go @@ -9,7 +9,7 @@ package containerd import ( "context" - "fmt" + "errors" "strings" "testing" "time" @@ -180,7 +180,7 @@ func TestBuildWorkloadMetaContainer(t *testing.T) { return image, nil }, mockTask: func() (containerd.Task, error) { - return nil, fmt.Errorf("no task found") + return nil, errors.New("no task found") }, }, expected: workloadmeta.Container{ diff --git a/comp/core/workloadmeta/collectors/internal/containerd/containerd.go b/comp/core/workloadmeta/collectors/internal/containerd/containerd.go index cd92ca2282ba69..38d005b64d5653 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/containerd.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/containerd.go @@ -366,13 +366,13 @@ func (c *collector) extractContainerFromEvent(ctx context.Context, containerdEve case containerCreationTopic, containerUpdateTopic, containerDeletionTopic: containerID, hasID = containerdEvent.Field([]string{"event", "id"}) if !hasID { - return "", nil, fmt.Errorf("missing ID in containerd event") + return "", nil, errors.New("missing ID in containerd event") } case TaskStartTopic, TaskOOMTopic, TaskPausedTopic, TaskResumedTopic, TaskExitTopic, TaskDeleteTopic: containerID, hasID = containerdEvent.Field([]string{"event", "container_id"}) if !hasID { - return "", nil, fmt.Errorf("missing ID in containerd event") + return "", nil, errors.New("missing ID in containerd event") } default: diff --git a/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go b/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go index c3df5c1894886f..ee8edb78d7c34b 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go @@ -33,7 +33,7 @@ func (c *collector) startSBOMCollection(ctx context.Context) error { c.sbomScanner = scanner.GetGlobalScanner() if c.sbomScanner == nil { - return fmt.Errorf("error retrieving global SBOM scanner") + return errors.New("error retrieving global SBOM scanner") } filter := workloadmeta.NewFilterBuilder(). @@ -48,7 +48,7 @@ func (c *collector) startSBOMCollection(ctx context.Context) error { ) scanner := collectors.GetContainerdScanner() if scanner == nil { - return fmt.Errorf("error retrieving global containerd scanner") + return errors.New("error retrieving global containerd scanner") } errs := c.filterSBOMContainers.GetErrors() @@ -58,7 +58,7 @@ func (c *collector) startSBOMCollection(ctx context.Context) error { resultChan := scanner.Channel() if resultChan == nil { - return fmt.Errorf("error retrieving global containerd scanner channel") + return errors.New("error retrieving global containerd scanner channel") } go func() { for { diff --git a/comp/core/workloadmeta/collectors/internal/crio/crio_test.go b/comp/core/workloadmeta/collectors/internal/crio/crio_test.go index 28ee58e25af902..85d48e1c36a3bb 100644 --- a/comp/core/workloadmeta/collectors/internal/crio/crio_test.go +++ b/comp/core/workloadmeta/collectors/internal/crio/crio_test.go @@ -10,7 +10,6 @@ package crio import ( "context" "errors" - "fmt" "testing" "time" @@ -489,7 +488,7 @@ func TestGenerateImageEventFromContainer(t *testing.T) { { name: "Error retrieving image metadata", mockGetContainerImg: func(_ context.Context, _ *v1.ImageSpec, _ bool) (*v1.ImageStatusResponse, error) { - return nil, fmt.Errorf("failed to retrieve image metadata") + return nil, errors.New("failed to retrieve image metadata") }, container: &v1.Container{ Id: "container1", diff --git a/comp/core/workloadmeta/collectors/internal/crio/image.go b/comp/core/workloadmeta/collectors/internal/crio/image.go index 4739fa8059c7d1..1050674c61722d 100644 --- a/comp/core/workloadmeta/collectors/internal/crio/image.go +++ b/comp/core/workloadmeta/collectors/internal/crio/image.go @@ -10,6 +10,7 @@ package crio import ( "context" "encoding/json" + "errors" "fmt" "os" "strings" @@ -107,7 +108,7 @@ func generateUnsetImageEvent(seenID workloadmeta.EntityID) *workloadmeta.Collect // The backend requires the image ID to be set as the SHA to correctly associate the SBOM with the image. func parseDigests(imageRefs []string) (string, error) { if len(imageRefs) == 0 { - return "", fmt.Errorf("empty digests list") + return "", errors.New("empty digests list") } parts := strings.SplitN(imageRefs[0], "@", 2) if len(parts) < 2 { diff --git a/comp/core/workloadmeta/collectors/internal/crio/image_sbom_trivy.go b/comp/core/workloadmeta/collectors/internal/crio/image_sbom_trivy.go index d9e32c3050090d..0784f210cdf34e 100644 --- a/comp/core/workloadmeta/collectors/internal/crio/image_sbom_trivy.go +++ b/comp/core/workloadmeta/collectors/internal/crio/image_sbom_trivy.go @@ -34,7 +34,7 @@ func (c *collector) startSBOMCollection(ctx context.Context) error { } c.sbomScanner = scanner.GetGlobalScanner() if c.sbomScanner == nil { - return fmt.Errorf("global SBOM scanner not found") + return errors.New("global SBOM scanner not found") } filter := workloadmeta.NewFilterBuilder(). @@ -46,12 +46,12 @@ func (c *collector) startSBOMCollection(ctx context.Context) error { scanner := collectors.GetCrioScanner() if scanner == nil { - return fmt.Errorf("failed to retrieve CRI-O SBOM scanner") + return errors.New("failed to retrieve CRI-O SBOM scanner") } resultChan := scanner.Channel() if resultChan == nil { - return fmt.Errorf("failed to retrieve scanner result channel") + return errors.New("failed to retrieve scanner result channel") } errs := c.sbomFilter.GetErrors() diff --git a/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go b/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go index 90107ec8ca0819..88d612a4ca6ade 100644 --- a/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go +++ b/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go @@ -38,7 +38,7 @@ func (c *collector) startSBOMCollection(ctx context.Context) error { c.sbomScanner = scanner.GetGlobalScanner() if c.sbomScanner == nil { - return fmt.Errorf("error retrieving global SBOM scanner") + return errors.New("error retrieving global SBOM scanner") } filter := workloadmeta.NewFilterBuilder(). @@ -54,11 +54,11 @@ func (c *collector) startSBOMCollection(ctx context.Context) error { scanner := collectors.GetDockerScanner() if scanner == nil { - return fmt.Errorf("error retrieving global docker scanner") + return errors.New("error retrieving global docker scanner") } resultChan := scanner.Channel() if resultChan == nil { - return fmt.Errorf("error retrieving global docker scanner channel") + return errors.New("error retrieving global docker scanner channel") } errs := c.filterSBOMContainers.GetErrors() diff --git a/comp/core/workloadmeta/collectors/internal/ecs/v4parser_test.go b/comp/core/workloadmeta/collectors/internal/ecs/v4parser_test.go index a8b94cdc58caba..5d71a0031ccb72 100644 --- a/comp/core/workloadmeta/collectors/internal/ecs/v4parser_test.go +++ b/comp/core/workloadmeta/collectors/internal/ecs/v4parser_test.go @@ -9,7 +9,6 @@ package ecs import ( "context" - "fmt" "net/http/httptest" "testing" "time" @@ -215,7 +214,7 @@ func getFakeWorkloadmetaStore(ecsAgentURL string) *fakeWorkloadmetaStore { // add delay to trigger timeout return &workloadmeta.Container{ EnvVars: map[string]string{ - v3or4.DefaultMetadataURIv4EnvVariable: fmt.Sprintf("%s/v4/1234-2", ecsAgentURL), + v3or4.DefaultMetadataURIv4EnvVariable: ecsAgentURL + "/v4/1234-2", }, }, nil } @@ -224,13 +223,13 @@ func getFakeWorkloadmetaStore(ecsAgentURL string) *fakeWorkloadmetaStore { // add delay to trigger timeout return &workloadmeta.Container{ EnvVars: map[string]string{ - v3or4.DefaultMetadataURIv4EnvVariable: fmt.Sprintf("%s/v4/1234-1", ecsAgentURL), + v3or4.DefaultMetadataURIv4EnvVariable: ecsAgentURL + "/v4/1234-1", }, }, nil } return &workloadmeta.Container{ EnvVars: map[string]string{ - v3or4.DefaultMetadataURIv4EnvVariable: fmt.Sprintf("%s/v4/undefined", ecsAgentURL), + v3or4.DefaultMetadataURIv4EnvVariable: ecsAgentURL + "/v4/undefined", }, }, nil }, diff --git a/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go b/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go index aab69c1fa2fa59..b976ae68cb46c7 100644 --- a/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go +++ b/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go @@ -10,6 +10,7 @@ package kubemetadata import ( "context" + stderrors "errors" "fmt" "strings" "time" @@ -355,7 +356,7 @@ func (c *collector) getNamespaceMetadata(ns string) (*clusteragent.Metadata, err } if !c.isDCAEnabled() { - return nil, fmt.Errorf("cluster agent should be enabled in order to allow fetching namespace metadata") + return nil, stderrors.New("cluster agent should be enabled in order to allow fetching namespace metadata") } return c.dcaClient.GetNamespaceMetadata(ns) } diff --git a/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata_test.go b/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata_test.go index 3a48510bb7311c..505381b00a32e9 100644 --- a/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata_test.go +++ b/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata_test.go @@ -10,7 +10,6 @@ package kubemetadata import ( "context" "errors" - "fmt" "reflect" "testing" "time" @@ -172,7 +171,7 @@ func TestKubeMetadataCollector_getMetadata(t *testing.T) { name: "clusterAgentEnabled not enable, APIserver return error", args: args{ getPodMetaDataFromAPIServerFunc: func(string, string, string) ([]string, error) { - return nil, fmt.Errorf("fake error") + return nil, errors.New("fake error") }, po: &kubelet.Pod{}, }, @@ -231,7 +230,7 @@ func TestKubeMetadataCollector_getMetadata(t *testing.T) { clusterAgentEnabled: true, dcaClient: &FakeDCAClient{ LocalVersion: version.Version{Major: 1, Minor: 2}, - KubernetesMetadataNamesErr: fmt.Errorf("fake error"), + KubernetesMetadataNamesErr: errors.New("fake error"), }, }, want: nil, diff --git a/comp/core/workloadmeta/collectors/internal/remote/generic.go b/comp/core/workloadmeta/collectors/internal/remote/generic.go index eb3b10839d870d..5c4cc50813f1a9 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/generic.go +++ b/comp/core/workloadmeta/collectors/internal/remote/generic.go @@ -150,7 +150,7 @@ func (c *GenericCollector) startWorkloadmetaStream(maxElapsed time.Duration) err c.ctx, metadata.MD{ "authorization": []string{ - fmt.Sprintf("Bearer %s", c.IPC.GetAuthToken()), // TODO IPC: Remove this raw usage of the auth token + "Bearer " + c.IPC.GetAuthToken(), // TODO IPC: Remove this raw usage of the auth token }, }, ), diff --git a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go index 8db75602a2b498..ca5420564e8430 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go +++ b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go @@ -11,7 +11,7 @@ package processcollector import ( "context" - "fmt" + "errors" "strconv" "time" @@ -164,7 +164,7 @@ func (s *streamHandler) HandleResponse(store workloadmeta.Component, resp interf log.Trace("handling response") response, ok := resp.(*pbgo.ProcessStreamResponse) if !ok { - return nil, fmt.Errorf("incorrect response type") + return nil, errors.New("incorrect response type") } collectorEvents := make([]workloadmeta.CollectorEvent, 0, len(response.SetEvents)+len(response.UnsetEvents)) diff --git a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go index dd8f42cc6a2421..b2e97f12c7e30f 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go +++ b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go @@ -8,7 +8,7 @@ package workloadmeta import ( "context" - "fmt" + "errors" "slices" "go.uber.org/fx" @@ -98,7 +98,7 @@ type streamHandler struct { // NewCollector returns a CollectorProvider to build a remote workloadmeta collector, and an error if any. func NewCollector(deps dependencies) (workloadmeta.CollectorProvider, error) { if filterHasUnsupportedKind(deps.Params.Filter) { - return workloadmeta.CollectorProvider{}, fmt.Errorf("the filter specified contains unsupported kinds") + return workloadmeta.CollectorProvider{}, errors.New("the filter specified contains unsupported kinds") } return workloadmeta.CollectorProvider{ @@ -147,7 +147,7 @@ func (s *streamHandler) IsEnabled() bool { func (s *streamHandler) HandleResponse(_ workloadmeta.Component, resp interface{}) ([]workloadmeta.CollectorEvent, error) { response, ok := resp.(*pb.WorkloadmetaStreamResponse) if !ok { - return nil, fmt.Errorf("incorrect response type") + return nil, errors.New("incorrect response type") } var collectorEvents []workloadmeta.CollectorEvent diff --git a/comp/core/workloadmeta/impl/flare_provider.go b/comp/core/workloadmeta/impl/flare_provider.go index a27f1847977862..3d1bb8d054533e 100644 --- a/comp/core/workloadmeta/impl/flare_provider.go +++ b/comp/core/workloadmeta/impl/flare_provider.go @@ -49,7 +49,7 @@ func (w *workloadmeta) sbomFlareProvider(fb flaretypes.FlareBuilder) error { } names[name]++ - _ = fb.AddFileWithoutScrubbing(filepath.Join("sbom", fmt.Sprintf("%s.json", name)), content) + _ = fb.AddFileWithoutScrubbing(filepath.Join("sbom", name+".json"), content) } return nil diff --git a/comp/core/workloadmeta/impl/store.go b/comp/core/workloadmeta/impl/store.go index cf8fceb0f88e0b..2f786272d88b1a 100644 --- a/comp/core/workloadmeta/impl/store.go +++ b/comp/core/workloadmeta/impl/store.go @@ -7,6 +7,7 @@ package workloadmetaimpl import ( "context" + "errors" "fmt" "slices" "sort" @@ -17,7 +18,7 @@ import ( wmdef "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/telemetry" - "github.com/DataDog/datadog-agent/pkg/errors" + pkgerrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" @@ -246,7 +247,7 @@ func (w *workloadmeta) GetKubernetesPodByName(podName, podNamespace string) (*wm } } - return nil, errors.NewNotFound(podName) + return nil, pkgerrors.NewNotFound(podName) } // ListKubernetesPods implements Store#ListKubernetesPods @@ -319,27 +320,27 @@ func (w *workloadmeta) GetContainerForProcess(processID string) (*wmdef.Containe processEntities, ok := w.store[wmdef.KindProcess] if !ok { - return nil, errors.NewNotFound(string(wmdef.KindProcess)) + return nil, pkgerrors.NewNotFound(string(wmdef.KindProcess)) } processEntity, ok := processEntities[processID] if !ok { - return nil, errors.NewNotFound(processID) + return nil, pkgerrors.NewNotFound(processID) } process := processEntity.cached.(*wmdef.Process) if process.Owner == nil || process.Owner.Kind != wmdef.KindContainer { - return nil, errors.NewNotFound(processID) + return nil, pkgerrors.NewNotFound(processID) } containerEntities, ok := w.store[wmdef.KindContainer] if !ok { - return nil, errors.NewNotFound(process.Owner.ID) + return nil, pkgerrors.NewNotFound(process.Owner.ID) } container, ok := containerEntities[process.Owner.ID] if !ok { - return nil, errors.NewNotFound(process.Owner.ID) + return nil, pkgerrors.NewNotFound(process.Owner.ID) } return container.cached.(*wmdef.Container), nil @@ -352,27 +353,27 @@ func (w *workloadmeta) GetKubernetesPodForContainer(containerID string) (*wmdef. containerEntities, ok := w.store[wmdef.KindContainer] if !ok { - return nil, errors.NewNotFound(containerID) + return nil, pkgerrors.NewNotFound(containerID) } containerEntity, ok := containerEntities[containerID] if !ok { - return nil, errors.NewNotFound(containerID) + return nil, pkgerrors.NewNotFound(containerID) } container := containerEntity.cached.(*wmdef.Container) if container.Owner == nil || container.Owner.Kind != wmdef.KindKubernetesPod { - return nil, errors.NewNotFound(containerID) + return nil, pkgerrors.NewNotFound(containerID) } podEntities, ok := w.store[wmdef.KindKubernetesPod] if !ok { - return nil, errors.NewNotFound(container.Owner.ID) + return nil, pkgerrors.NewNotFound(container.Owner.ID) } pod, ok := podEntities[container.Owner.ID] if !ok { - return nil, errors.NewNotFound(container.Owner.ID) + return nil, pkgerrors.NewNotFound(container.Owner.ID) } return pod.cached.(*wmdef.KubernetesPod), nil @@ -581,7 +582,7 @@ func (w *workloadmeta) IsInitialized() bool { func (w *workloadmeta) validatePushEvents(events []wmdef.Event) error { for _, event := range events { if event.Type != wmdef.EventTypeSet && event.Type != wmdef.EventTypeUnset { - return fmt.Errorf("unsupported Event type: only EventTypeSet and EventTypeUnset types are allowed for push events") + return errors.New("unsupported Event type: only EventTypeSet and EventTypeUnset types are allowed for push events") } } return nil @@ -629,7 +630,7 @@ func (w *workloadmeta) startCandidatesWithRetry(ctx context.Context) error { return nil } - return fmt.Errorf("some collectors failed to start. Will retry") + return errors.New("some collectors failed to start. Will retry") }, expBackoff) } @@ -869,12 +870,12 @@ func (w *workloadmeta) getEntityByKind(kind wmdef.Kind, id string) (wmdef.Entity entitiesOfKind, ok := w.store[kind] if !ok { - return nil, errors.NewNotFound(string(kind)) + return nil, pkgerrors.NewNotFound(string(kind)) } entity, ok := entitiesOfKind[id] if !ok { - return nil, errors.NewNotFound(id) + return nil, pkgerrors.NewNotFound(id) } return entity.cached, nil diff --git a/comp/core/workloadmeta/proto/proto.go b/comp/core/workloadmeta/proto/proto.go index 3c493e341b8439..24a63380df7257 100644 --- a/comp/core/workloadmeta/proto/proto.go +++ b/comp/core/workloadmeta/proto/proto.go @@ -7,6 +7,7 @@ package proto import ( + "errors" "fmt" "time" @@ -721,7 +722,7 @@ func WorkloadmetaEventFromProtoEvent(protoEvent *pb.WorkloadmetaEvent) (workload }, nil } - return workloadmeta.Event{}, fmt.Errorf("unknown entity") + return workloadmeta.Event{}, errors.New("unknown entity") } func toWorkloadmetaKind(protoKind pb.WorkloadmetaKind) (workloadmeta.Kind, error) { diff --git a/comp/dogstatsd/listeners/udp.go b/comp/dogstatsd/listeners/udp.go index e2f7f18b4858cf..39c1d9680c0b14 100644 --- a/comp/dogstatsd/listeners/udp.go +++ b/comp/dogstatsd/listeners/udp.go @@ -68,7 +68,7 @@ func NewUDPListener(packetOut chan packets.Packets, sharedPacketPoolManager *pac if cfg.GetBool("dogstatsd_non_local_traffic") { // Listen to all network interfaces - url = fmt.Sprintf(":%s", port) + url = ":" + port } else { url = net.JoinHostPort(configutils.GetBindHost(cfg), port) } diff --git a/comp/dogstatsd/listeners/uds_linux.go b/comp/dogstatsd/listeners/uds_linux.go index 1abfbcd0a30d70..6db89117f59bc4 100644 --- a/comp/dogstatsd/listeners/uds_linux.go +++ b/comp/dogstatsd/listeners/uds_linux.go @@ -7,7 +7,6 @@ package listeners import ( "errors" - "fmt" "strconv" "syscall" "time" @@ -63,7 +62,7 @@ func processUDSOrigin(ancillary []byte, wmeta option.Option[workloadmeta.Compone return 0, packets.NoOrigin, err } if len(messages) == 0 { - return 0, packets.NoOrigin, fmt.Errorf("ancillary data empty") + return 0, packets.NoOrigin, errors.New("ancillary data empty") } cred, err := unix.ParseUnixCredentials(&messages[0]) if err != nil { @@ -71,7 +70,7 @@ func processUDSOrigin(ancillary []byte, wmeta option.Option[workloadmeta.Compone } if cred.Pid == 0 { - return 0, packets.NoOrigin, fmt.Errorf("matched PID for the process is 0, it belongs " + + return 0, packets.NoOrigin, errors.New("matched PID for the process is 0, it belongs " + "probably to another namespace. Is the agent in host PID mode?") } diff --git a/comp/dogstatsd/replay/impl/capture.go b/comp/dogstatsd/replay/impl/capture.go index cac424c81f4ac3..66b7dd0bda2363 100644 --- a/comp/dogstatsd/replay/impl/capture.go +++ b/comp/dogstatsd/replay/impl/capture.go @@ -8,7 +8,7 @@ package replayimpl import ( "context" - "fmt" + "errors" "path" "sync" "time" @@ -56,7 +56,7 @@ func NewTrafficCapture(deps Requires) replay.Component { func (tc *trafficCapture) configure(_ context.Context) error { writer := NewTrafficCaptureWriter(tc.config.GetInt("dogstatsd_capture_depth"), tc.tagger) if writer == nil { - tc.startUpError = fmt.Errorf("unable to instantiate capture writer") + tc.startUpError = errors.New("unable to instantiate capture writer") } tc.writer = writer @@ -78,7 +78,7 @@ func (tc *trafficCapture) IsOngoing() bool { // StartCapture starts a TrafficCapture and returns an error in the event of an issue. func (tc *trafficCapture) StartCapture(p string, d time.Duration, compressed bool) (string, error) { if tc.IsOngoing() { - return "", fmt.Errorf("Ongoing capture in progress") + return "", errors.New("Ongoing capture in progress") } target, path, err := OpenFile(afero.NewOsFs(), p, tc.defaultlocation()) diff --git a/comp/dogstatsd/replay/impl/file.go b/comp/dogstatsd/replay/impl/file.go index b2e8538ed79ffb..9af5fae083f82f 100644 --- a/comp/dogstatsd/replay/impl/file.go +++ b/comp/dogstatsd/replay/impl/file.go @@ -6,7 +6,7 @@ package replayimpl import ( - "fmt" + "errors" "io" "github.com/h2non/filetype" @@ -44,12 +44,12 @@ func datadogMatcher(buf []byte) bool { func fileVersion(buf []byte) (int, error) { if !datadogMatcher(buf) { - return -1, fmt.Errorf("Cannot verify file version bad buffer or invalid file") + return -1, errors.New("Cannot verify file version bad buffer or invalid file") } ver := int(0xF0 ^ buf[4]) if ver > int(datadogFileVersion) { - return -1, fmt.Errorf("Unsupported file version") + return -1, errors.New("Unsupported file version") } return ver, nil } diff --git a/comp/dogstatsd/replay/impl/file_common.go b/comp/dogstatsd/replay/impl/file_common.go index 9f7f8dee61ae1c..ec37af539f0433 100644 --- a/comp/dogstatsd/replay/impl/file_common.go +++ b/comp/dogstatsd/replay/impl/file_common.go @@ -5,14 +5,14 @@ package replayimpl -import "fmt" +import "errors" var ( // DATADOG0F1FF0000 in HEX (D474D060F1FF0000); (F0 | datadogFileVersion) for different file versions support // 00 to terminate header datadogHeader = []byte{0xD4, 0x74, 0xD0, 0x60, 0xF0, 0xFF, 0x00, 0x00} //nolint:revive // TODO(AML) Fix revive linter - ErrHeaderWrite = fmt.Errorf("capture file header could not be fully written to buffer") + ErrHeaderWrite = errors.New("capture file header could not be fully written to buffer") ) const ( diff --git a/comp/dogstatsd/replay/impl/writer.go b/comp/dogstatsd/replay/impl/writer.go index ad70e5b839fd65..4b0ae5b7bcc9e6 100644 --- a/comp/dogstatsd/replay/impl/writer.go +++ b/comp/dogstatsd/replay/impl/writer.go @@ -8,6 +8,7 @@ package replayimpl import ( "bufio" "encoding/binary" + "errors" "fmt" "io" "os" @@ -274,7 +275,7 @@ func (tc *TrafficCaptureWriter) Enqueue(msg *replay.CaptureBuffer) bool { // RegisterSharedPoolManager registers the shared pool manager with the TrafficCaptureWriter. func (tc *TrafficCaptureWriter) RegisterSharedPoolManager(p *packets.PoolManager[packets.Packet]) error { if tc.sharedPacketPoolManager != nil { - return fmt.Errorf("OOB Pool Manager already registered with the writer") + return errors.New("OOB Pool Manager already registered with the writer") } tc.sharedPacketPoolManager = p @@ -285,7 +286,7 @@ func (tc *TrafficCaptureWriter) RegisterSharedPoolManager(p *packets.PoolManager // RegisterOOBPoolManager registers the OOB shared pool manager with the TrafficCaptureWriter. func (tc *TrafficCaptureWriter) RegisterOOBPoolManager(p *packets.PoolManager[[]byte]) error { if tc.oobPacketPoolManager != nil { - return fmt.Errorf("OOB Pool Manager already registered with the writer") + return errors.New("OOB Pool Manager already registered with the writer") } tc.oobPacketPoolManager = p diff --git a/comp/dogstatsd/server/convert_bench_test.go b/comp/dogstatsd/server/convert_bench_test.go index 5105c3ab4f7c17..d9277afae36476 100644 --- a/comp/dogstatsd/server/convert_bench_test.go +++ b/comp/dogstatsd/server/convert_bench_test.go @@ -7,6 +7,7 @@ package server import ( "fmt" + "strings" "testing" "go.uber.org/fx" @@ -25,15 +26,17 @@ import ( ) func buildRawSample(tagCount int, multipleValues bool) []byte { - tags := "tag0:val0" + var builder strings.Builder + builder.WriteString("tag0:val0") for i := 1; i < tagCount; i++ { - tags += fmt.Sprintf(",tag%d:val%d", i, i) + fmt.Fprintf(&builder, ",tag%d:val%d", i, i) } + tags := builder.String() if multipleValues { - return []byte(fmt.Sprintf("daemon:666:777|h|@0.5|#%s", tags)) + return []byte("daemon:666:777|h|@0.5|#" + tags) } - return []byte(fmt.Sprintf("daemon:666|h|@0.5|#%s", tags)) + return []byte("daemon:666|h|@0.5|#" + tags) } // used to store the result and avoid optimizations diff --git a/comp/dogstatsd/server/enrich_test.go b/comp/dogstatsd/server/enrich_test.go index 3ec8a86277b449..4af8ddede315ea 100644 --- a/comp/dogstatsd/server/enrich_test.go +++ b/comp/dogstatsd/server/enrich_test.go @@ -6,6 +6,7 @@ package server import ( + "errors" "fmt" "testing" @@ -44,7 +45,7 @@ func parseAndEnrichSingleMetricMessage(t *testing.T, message []byte, conf enrich samples := []metrics.MetricSample{} samples = enrichMetricSample(samples, parsed, "", 0, "", conf, nil) if len(samples) != 1 { - return metrics.MetricSample{}, fmt.Errorf("wrong number of metrics parsed") + return metrics.MetricSample{}, errors.New("wrong number of metrics parsed") } return samples[0], nil } diff --git a/comp/dogstatsd/server/parse.go b/comp/dogstatsd/server/parse.go index e7b6b3b5123cf0..61429bb9f5c0c4 100644 --- a/comp/dogstatsd/server/parse.go +++ b/comp/dogstatsd/server/parse.go @@ -7,6 +7,7 @@ package server import ( "bytes" + "errors" "fmt" "strconv" "time" @@ -133,7 +134,7 @@ func (p *parser) parseMetricSample(message []byte) (dogstatsdMetricSample, error // especially important here since all the unidentified garbage gets // identified as metrics if !hasMetricSampleFormat(message) { - return dogstatsdMetricSample{}, fmt.Errorf("invalid dogstatsd message format") + return dogstatsdMetricSample{}, errors.New("invalid dogstatsd message format") } rawNameAndValue, message := nextField(message) @@ -202,7 +203,7 @@ func (p *parser) parseMetricSample(message []byte) (dogstatsdMetricSample, error return dogstatsdMetricSample{}, fmt.Errorf("could not parse dogstatsd timestamp %q: %v", optionalField[len(timestampFieldPrefix):], err) } if ts < 1 { - return dogstatsdMetricSample{}, fmt.Errorf("dogstatsd timestamp should be > 0") + return dogstatsdMetricSample{}, errors.New("dogstatsd timestamp should be > 0") } timestamp = time.Unix(ts, 0) // local data @@ -290,7 +291,7 @@ func (p *parser) parseFloat64List(rawFloats []byte) ([]float64, error) { } if len(values) == 0 { p.float64List.put(values) - return nil, fmt.Errorf("no value found") + return nil, errors.New("no value found") } return values, nil } diff --git a/comp/dogstatsd/server/parse_events.go b/comp/dogstatsd/server/parse_events.go index ba4c0fecfbc482..8e4622f8a452bf 100644 --- a/comp/dogstatsd/server/parse_events.go +++ b/comp/dogstatsd/server/parse_events.go @@ -105,7 +105,7 @@ func parseHeader(rawHeader []byte) (eventHeader, error) { // Ensure that title isn't empty if titleLength == 0 { - return eventHeader{}, fmt.Errorf("invalid event: empty title") + return eventHeader{}, errors.New("invalid event: empty title") } // Convert text length to workable type and do a basic validity check on value diff --git a/comp/dogstatsd/server/parse_service_checks.go b/comp/dogstatsd/server/parse_service_checks.go index 1c230d088e3739..5f17ec1346bfdc 100644 --- a/comp/dogstatsd/server/parse_service_checks.go +++ b/comp/dogstatsd/server/parse_service_checks.go @@ -7,6 +7,7 @@ package server import ( "bytes" + "errors" "fmt" "strconv" @@ -67,7 +68,7 @@ func hasServiceCheckFormat(message []byte) bool { func parseServiceCheckName(rawName []byte) ([]byte, error) { if len(rawName) == 0 { - return nil, fmt.Errorf("invalid dogstatsd service check name: empty name") + return nil, errors.New("invalid dogstatsd service check name: empty name") } return rawName, nil } @@ -117,7 +118,7 @@ func (p *parser) applyServiceCheckOptionalField(serviceCheck dogstatsdServiceChe func (p *parser) parseServiceCheck(message []byte) (dogstatsdServiceCheck, error) { if !hasServiceCheckFormat(message) { - return dogstatsdServiceCheck{}, fmt.Errorf("invalid dogstatsd service check format") + return dogstatsdServiceCheck{}, errors.New("invalid dogstatsd service check format") } // pop the _sc| header message = message[4:] diff --git a/comp/dogstatsd/server/server.go b/comp/dogstatsd/server/server.go index cd3afff72a41ed..7e9f649d1c72cd 100644 --- a/comp/dogstatsd/server/server.go +++ b/comp/dogstatsd/server/server.go @@ -8,6 +8,7 @@ package server import ( "bytes" "context" + "errors" "expvar" "fmt" "net" @@ -451,7 +452,7 @@ func (s *server) start(context.Context) error { } if len(tmpListeners) == 0 { - return fmt.Errorf("listening on neither udp nor socket, please check your configuration") + return errors.New("listening on neither udp nor socket, please check your configuration") } s.packetsIn = packetsChannel diff --git a/comp/dogstatsd/server/server_bench_test.go b/comp/dogstatsd/server/server_bench_test.go index 59efc8dc0958be..d1aca85e8b28bc 100644 --- a/comp/dogstatsd/server/server_bench_test.go +++ b/comp/dogstatsd/server/server_bench_test.go @@ -8,6 +8,7 @@ package server import ( + "strings" "testing" "time" @@ -19,16 +20,18 @@ import ( ) func buildPacketContent(numberOfMetrics int, nbValuePerMessage int) []byte { - values := "" + var valuesBuilder strings.Builder for i := 0; i < nbValuePerMessage; i++ { - values += ":666" + valuesBuilder.WriteString(":666") } - rawPacket := "daemon" + values + "|h|@0.5|#sometag1:somevalue1,sometag2:somevalue2" - packets := rawPacket + rawPacket := "daemon" + valuesBuilder.String() + "|h|@0.5|#sometag1:somevalue1,sometag2:somevalue2" + var packetsBuilder strings.Builder + packetsBuilder.WriteString(rawPacket) for i := 1; i < numberOfMetrics; i++ { - packets += "\n" + rawPacket + packetsBuilder.WriteString("\n") + packetsBuilder.WriteString(rawPacket) } - return []byte(packets) + return []byte(packetsBuilder.String()) } func benchParsePackets(b *testing.B, rawPacket []byte) { diff --git a/comp/dogstatsd/server/server_test.go b/comp/dogstatsd/server/server_test.go index f2153cf952fc78..15b7f4082319ed 100644 --- a/comp/dogstatsd/server/server_test.go +++ b/comp/dogstatsd/server/server_test.go @@ -9,6 +9,7 @@ package server import ( "fmt" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -74,7 +75,7 @@ func TestNoRaceOriginTagMaps(t *testing.T) { sync := make(chan struct{}) done := make(chan struct{}, N) for i := 0; i < N; i++ { - id := fmt.Sprintf("%d", i) + id := strconv.Itoa(i) go func() { defer func() { done <- struct{}{} }() <-sync diff --git a/comp/etw/impl/etwSession.go b/comp/etw/impl/etwSession.go index c6486c54cd0787..156714fce3f5cb 100644 --- a/comp/etw/impl/etwSession.go +++ b/comp/etw/impl/etwSession.go @@ -47,7 +47,7 @@ func (e *etwSession) ConfigureProvider(providerGUID windows.GUID, configurations func (e *etwSession) EnableProvider(providerGUID windows.GUID) error { if e.wellKnown { - return fmt.Errorf("cannot enable provider on well-known session") + return errors.New("cannot enable provider on well-known session") } if _, ok := e.providers[providerGUID]; !ok { // ConfigureProvider was not called prior, set the default configuration @@ -66,7 +66,7 @@ func (e *etwSession) EnableProvider(providerGUID windows.GUID) error { * allows you to send one or the other but not both. */ if len(cfg.EnabledIDs) > 0 && len(cfg.DisabledIDs) > 0 { - return fmt.Errorf("cannot enable and disable the same provider at the same time") + return errors.New("cannot enable and disable the same provider at the same time") } var enabledFilters *C.USHORT var enabledFilterCount C.ULONG @@ -259,7 +259,7 @@ func createEtwSession(name string, f etw.SessionConfigurationFunc) (*etwSession, if f != nil { f(&s.sessionConfig) if s.sessionConfig.MaxBuffers != 0 && s.sessionConfig.MaxBuffers < s.sessionConfig.MinBuffers { - return nil, fmt.Errorf("max buffers must be greater than or equal to min buffers") + return nil, errors.New("max buffers must be greater than or equal to min buffers") } } @@ -300,7 +300,7 @@ func createWellKnownEtwSession(name string, f etw.SessionConfigurationFunc) (*et if f != nil { f(&s.sessionConfig) if s.sessionConfig.MaxBuffers != 0 && s.sessionConfig.MaxBuffers < s.sessionConfig.MinBuffers { - return nil, fmt.Errorf("max buffers must be greater than or equal to min buffers") + return nil, errors.New("max buffers must be greater than or equal to min buffers") } } diff --git a/comp/forwarder/defaultforwarder/default_forwarder.go b/comp/forwarder/defaultforwarder/default_forwarder.go index 1925e2dae63242..45772d92147181 100644 --- a/comp/forwarder/defaultforwarder/default_forwarder.go +++ b/comp/forwarder/defaultforwarder/default_forwarder.go @@ -6,6 +6,7 @@ package defaultforwarder import ( + "errors" "fmt" "maps" "net/http" @@ -441,7 +442,7 @@ func (f *DefaultForwarder) Start() error { defer f.m.Unlock() if f.internalState.Load() == Started { - return fmt.Errorf("the forwarder is already started") + return errors.New("the forwarder is already started") } for _, df := range f.domainForwarders { @@ -552,7 +553,7 @@ func (f *DefaultForwarder) createAdvancedHTTPTransactions(endpoint transaction.E t.Destination = payload.Destination auth.Authorize(t) t.Headers.Set(versionHTTPHeaderKey, version.AgentVersion) - t.Headers.Set(useragentHTTPHeaderKey, fmt.Sprintf("datadog-agent/%s", version.AgentVersion)) + t.Headers.Set(useragentHTTPHeaderKey, "datadog-agent/"+version.AgentVersion) if allowArbitraryTags { t.Headers.Set(arbitraryTagHTTPHeaderKey, "true") } @@ -574,7 +575,7 @@ func (f *DefaultForwarder) createAdvancedHTTPTransactions(endpoint transaction.E func (f *DefaultForwarder) sendHTTPTransactions(transactions []*transaction.HTTPTransaction) error { if f.internalState.Load() == Stopped { - return fmt.Errorf("the forwarder is not started") + return errors.New("the forwarder is not started") } f.retryQueueDurationCapacityMutex.Lock() @@ -793,7 +794,7 @@ func (f *DefaultForwarder) GetDomainResolvers() []pkgresolver.DomainResolver { // SubmitTransaction adds a transaction to the queue for sending. func (f *DefaultForwarder) SubmitTransaction(t *transaction.HTTPTransaction) error { t.Headers.Set(versionHTTPHeaderKey, version.AgentVersion) - t.Headers.Set(useragentHTTPHeaderKey, fmt.Sprintf("datadog-agent/%s", version.AgentVersion)) + t.Headers.Set(useragentHTTPHeaderKey, "datadog-agent/"+version.AgentVersion) if f.config.GetBool("allow_arbitrary_tags") { t.Headers.Set(arbitraryTagHTTPHeaderKey, "true") diff --git a/comp/forwarder/defaultforwarder/domain_forwarder.go b/comp/forwarder/defaultforwarder/domain_forwarder.go index 0cda180f57eb1a..5682488922b608 100644 --- a/comp/forwarder/defaultforwarder/domain_forwarder.go +++ b/comp/forwarder/defaultforwarder/domain_forwarder.go @@ -7,7 +7,7 @@ package defaultforwarder import ( "crypto/tls" - "fmt" + "errors" "net" "net/http" "sync" @@ -217,7 +217,7 @@ func (f *domainForwarder) Start() error { defer f.m.Unlock() if f.internalState == Started { - return fmt.Errorf("the forwarder is already started") + return errors.New("the forwarder is already started") } // reset internal state to purge transactions from past starts diff --git a/comp/forwarder/defaultforwarder/forwarder_health.go b/comp/forwarder/defaultforwarder/forwarder_health.go index bd69891791e227..5d6e70f035e94d 100644 --- a/comp/forwarder/defaultforwarder/forwarder_health.go +++ b/comp/forwarder/defaultforwarder/forwarder_health.go @@ -221,7 +221,7 @@ func (fh *forwarderHealth) setAPIKeyStatus(apiKey string, _ string, status *expv if len(apiKey) > 5 { apiKey = apiKey[len(apiKey)-5:] } - obfuscatedKey := fmt.Sprintf("API key ending with %s", apiKey) + obfuscatedKey := "API key ending with " + apiKey if status == &apiKeyRemove { apiKeyStatus.Delete(obfuscatedKey) apiKeyFailure.Delete(obfuscatedKey) @@ -255,7 +255,7 @@ func (fh *forwarderHealth) validateAPIKey(apiKey, domain string) (bool, error) { return false, err } - req.Header.Set(useragentHTTPHeaderKey, fmt.Sprintf("datadog-agent/%s", version.AgentVersion)) + req.Header.Set(useragentHTTPHeaderKey, "datadog-agent/"+version.AgentVersion) resp, err := client.Do(req) if err != nil { diff --git a/comp/forwarder/defaultforwarder/internal/retry/file_removal_policy.go b/comp/forwarder/defaultforwarder/internal/retry/file_removal_policy.go index 5fe6bdadf41cf0..07dfa925f7fede 100644 --- a/comp/forwarder/defaultforwarder/internal/retry/file_removal_policy.go +++ b/comp/forwarder/defaultforwarder/internal/retry/file_removal_policy.go @@ -7,7 +7,7 @@ package retry import ( "crypto/md5" - "fmt" + "encoding/hex" "io" "os" "path" @@ -113,7 +113,7 @@ func (p *FileRemovalPolicy) getFolderPathForDomain(domainName string) (string, e if _, err := io.WriteString(h, domainName); err != nil { return "", err } - folder := fmt.Sprintf("%x", h.Sum(nil)) + folder := hex.EncodeToString(h.Sum(nil)) return path.Join(p.rootPath, folder), nil } diff --git a/comp/forwarder/defaultforwarder/internal/retry/telemetry.go b/comp/forwarder/defaultforwarder/internal/retry/telemetry.go index f676c5bc75d8a1..6d093b9d3d4bfa 100644 --- a/comp/forwarder/defaultforwarder/internal/retry/telemetry.go +++ b/comp/forwarder/defaultforwarder/internal/retry/telemetry.go @@ -306,13 +306,13 @@ func (t onDiskRetryQueueTelemetry) addDeserializeTransactionsCount(count int) { func toCamelCase(s string) string { parts := strings.Split(s, "_") - var camelCase string + var builder strings.Builder for _, p := range parts { if p == "" { continue } - camelCase += strings.ToUpper(string(p[0])) - camelCase += string(p[1:]) + builder.WriteString(strings.ToUpper(string(p[0]))) + builder.WriteString(p[1:]) } - return camelCase + return builder.String() } diff --git a/comp/forwarder/defaultforwarder/resolver/domain_resolver.go b/comp/forwarder/defaultforwarder/resolver/domain_resolver.go index a512086255bc19..ef65ad4c8feab6 100644 --- a/comp/forwarder/defaultforwarder/resolver/domain_resolver.go +++ b/comp/forwarder/defaultforwarder/resolver/domain_resolver.go @@ -418,7 +418,7 @@ func (r *domainResolver) GetAuthorizers() (res []authHeader) { if r.IsLocal() { res = append(res, authHeader{ key: "Authorization", - value: fmt.Sprintf("Bearer %s", r.authToken), + value: "Bearer " + r.authToken, }) } else { for _, key := range r.GetAPIKeys() { diff --git a/comp/forwarder/defaultforwarder/worker_test.go b/comp/forwarder/defaultforwarder/worker_test.go index cff811f3b2c246..88448f5e4c0e0e 100644 --- a/comp/forwarder/defaultforwarder/worker_test.go +++ b/comp/forwarder/defaultforwarder/worker_test.go @@ -8,7 +8,7 @@ package defaultforwarder import ( - "fmt" + "errors" "net/http" "strconv" "sync" @@ -101,7 +101,7 @@ func TestWorkerRetry(t *testing.T) { w := NewWorker(mockConfig, log, secrets, highPrio, lowPrio, requeue, newBlockedEndpoints(mockConfig, log), &PointSuccessfullySentMock{}, NewSharedConnection(log, false, 1, mockConfig)) mock := newTestTransaction() - mock.On("Process", w.Client.GetClient()).Return(fmt.Errorf("some kind of error")).Times(1) + mock.On("Process", w.Client.GetClient()).Return(errors.New("some kind of error")).Times(1) mock.On("GetTarget").Return("error_url").Times(1) w.Start() @@ -220,7 +220,7 @@ func TestWorkerCancelsInFlight(t *testing.T) { Run(func(_args tmock.Arguments) { processedwg.Done() }). - Return(fmt.Errorf("Cancelled")).Times(1) + Return(errors.New("Cancelled")).Times(1) mockTransaction.On("GetTarget").Return("").Times(1) @@ -287,7 +287,7 @@ func TestWorkerCancelsWaitingTransactions(t *testing.T) { Run(func(_args tmock.Arguments) { processedwg.Done() }). - Return(fmt.Errorf("Cancelled")).Times(1) + Return(errors.New("Cancelled")).Times(1) } else { // The other transactions succeed. mockTransaction.On("Process", w.Client.GetClient()).Return(nil).Times(1) diff --git a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go index 435190a06ebe15..454473c9de8f44 100644 --- a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go +++ b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go @@ -8,6 +8,7 @@ package eventplatformimpl import ( "context" + "errors" "fmt" "strconv" "strings" @@ -335,7 +336,7 @@ func Diagnose() []diagnose.Diagnosis { } url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfigsetup.Datadog()) - name := fmt.Sprintf("Connectivity to %s", url) + name := "Connectivity to " + url if err == nil { diagnoses = append(diagnoses, diagnose.Diagnosis{ Status: diagnose.DiagnosisSuccess, @@ -473,7 +474,7 @@ func newHTTPPassthroughPipeline( return nil, err } if !endpoints.UseHTTP { - return nil, fmt.Errorf("endpoints must be http") + return nil, errors.New("endpoints must be http") } // epforwarder pipelines apply their own defaults on top of the hardcoded logs defaults if endpoints.BatchMaxConcurrentSend <= 0 { diff --git a/comp/healthplatform/impl/health-platform.go b/comp/healthplatform/impl/health-platform.go index 7d32ab2067d8c0..d1d0334650e479 100644 --- a/comp/healthplatform/impl/health-platform.go +++ b/comp/healthplatform/impl/health-platform.go @@ -8,6 +8,7 @@ package healthplatformimpl import ( "context" + "errors" "fmt" "sync" "time" @@ -132,7 +133,7 @@ func (h *healthPlatformImpl) stop(_ context.Context) error { // If report is nil, it clears any existing issue (issue resolution) func (h *healthPlatformImpl) ReportIssue(checkID string, checkName string, report *healthplatform.IssueReport) error { if checkID == "" { - return fmt.Errorf("check ID cannot be empty") + return errors.New("check ID cannot be empty") } // Get previous issue for state change detection @@ -144,7 +145,7 @@ func (h *healthPlatformImpl) ReportIssue(checkID string, checkName string, repor var newIssue *healthplatform.Issue if report != nil { if report.IssueID == "" { - return fmt.Errorf("issue ID cannot be empty") + return errors.New("issue ID cannot be empty") } // Build complete issue from the registry using the issue ID and context diff --git a/comp/logs/agent/agentimpl/agent_restart_test.go b/comp/logs/agent/agentimpl/agent_restart_test.go index 1aba7ad89a9d28..2e344777690aec 100644 --- a/comp/logs/agent/agentimpl/agent_restart_test.go +++ b/comp/logs/agent/agentimpl/agent_restart_test.go @@ -74,7 +74,7 @@ func (suite *RestartTestSuite) SetupTest() { suite.testDir = suite.T().TempDir() - suite.testLogFile = fmt.Sprintf("%s/test.log", suite.testDir) + suite.testLogFile = suite.testDir + "/test.log" fd, err := os.Create(suite.testLogFile) suite.NoError(err) @@ -277,7 +277,7 @@ func (suite *RestartTestSuite) TestRestart_FlushesAuditor() { // Get the auditor registry file path to check it was written runPath := agent.config.GetString("logs_config.run_path") - registryPath := fmt.Sprintf("%s/registry.json", runPath) + registryPath := runPath + "/registry.json" // Get file mod time before restart var beforeModTime time.Time diff --git a/comp/logs/agent/agentimpl/agent_test.go b/comp/logs/agent/agentimpl/agent_test.go index 39d984eff37446..8941962b90c6ee 100644 --- a/comp/logs/agent/agentimpl/agent_test.go +++ b/comp/logs/agent/agentimpl/agent_test.go @@ -11,7 +11,6 @@ import ( "bytes" "context" "expvar" - "fmt" "os" "strings" "testing" @@ -89,7 +88,7 @@ func (suite *AgentTestSuite) SetupTest() { suite.testDir = suite.T().TempDir() - suite.testLogFile = fmt.Sprintf("%s/test.log", suite.testDir) + suite.testLogFile = suite.testDir + "/test.log" fd, err := os.Create(suite.testLogFile) suite.NoError(err) @@ -200,7 +199,7 @@ func (suite *AgentTestSuite) TestTruncateLogOriginAndService() { suite.configOverrides["logs_config.max_message_size_bytes"] = 10 // Only 1 byte // Create a test file with content that will definitely trigger log-line truncation - truncationLogFile := fmt.Sprintf("%s/truncation.log", suite.testDir) + truncationLogFile := suite.testDir + "/truncation.log" fd, err := os.Create(truncationLogFile) suite.NoError(err) defer fd.Close() diff --git a/comp/logs/agent/config/integration_config.go b/comp/logs/agent/config/integration_config.go index 3594501ef04c88..7b4e99a7f03c1c 100644 --- a/comp/logs/agent/config/integration_config.go +++ b/comp/logs/agent/config/integration_config.go @@ -7,6 +7,7 @@ package config import ( "encoding/json" + "errors" "fmt" "strings" "sync" @@ -176,7 +177,7 @@ func (t *StringSliceField) UnmarshalYAML(unmarshal func(interface{}) error) erro } return nil } - return fmt.Errorf("could not parse YAML config, please double check the yaml files") + return errors.New("could not parse YAML config, please double check the yaml files") } // Dump dumps the contents of this struct to a string, for debugging purposes. @@ -339,19 +340,19 @@ func (c *LogsConfig) Validate() error { // user don't have to specify a logs-config type when defining // an autodiscovery label because so we must override it at some point, // this check is mostly used for sanity purposed to detect an override miss. - return fmt.Errorf("a config must have a type") + return errors.New("a config must have a type") case c.Type == FileType: if c.Path == "" { - return fmt.Errorf("file source must have a path") + return errors.New("file source must have a path") } err := c.validateTailingMode() if err != nil { return err } case c.Type == TCPType && c.Port == 0: - return fmt.Errorf("tcp source must have a port") + return errors.New("tcp source must have a port") case c.Type == UDPType && c.Port == 0: - return fmt.Errorf("udp source must have a port") + return errors.New("udp source must have a port") } // Validate fingerprint configuration diff --git a/comp/logs/agent/config/processing_rules.go b/comp/logs/agent/config/processing_rules.go index eef12a170232e8..b33b7be7db3426 100644 --- a/comp/logs/agent/config/processing_rules.go +++ b/comp/logs/agent/config/processing_rules.go @@ -6,6 +6,7 @@ package config import ( + "errors" "fmt" "regexp" ) @@ -39,7 +40,7 @@ type ProcessingRule struct { func ValidateProcessingRules(rules []*ProcessingRule) error { for _, rule := range rules { if rule.Name == "" { - return fmt.Errorf("all processing rules must have a name") + return errors.New("all processing rules must have a name") } switch rule.Type { diff --git a/comp/logs/agent/flare/flare_controller.go b/comp/logs/agent/flare/flare_controller.go index 35999335aaa58f..dd5b8fc15a3423 100644 --- a/comp/logs/agent/flare/flare_controller.go +++ b/comp/logs/agent/flare/flare_controller.go @@ -57,7 +57,7 @@ func (fc *FlareController) FillFlare(fb flaretypes.FlareBuilder) error { default: fi, err := os.Stat(file) if err != nil { - fileInfo = fmt.Sprintf("%s\n", err.Error()) + fileInfo = err.Error() + "\n" } else { fileInfo = fmt.Sprintf("%s %s\n", file, fi.Mode().String()) } diff --git a/comp/logs/auditor/impl/api_v0.go b/comp/logs/auditor/impl/api_v0.go index 5efb3e1c80f24e..e255e8ba276e89 100644 --- a/comp/logs/auditor/impl/api_v0.go +++ b/comp/logs/auditor/impl/api_v0.go @@ -7,7 +7,6 @@ package auditorimpl import ( "encoding/json" - "fmt" "strconv" "time" ) @@ -36,7 +35,7 @@ func unmarshalRegistryV0(b []byte) (map[string]*RegistryEntry, error) { switch { case entry.Offset > 0: // from v0 to v1 and further, we also prefixed path with file: - newIdentifier := fmt.Sprintf("file:%s", identifier) + newIdentifier := "file:" + identifier registry[newIdentifier] = &RegistryEntry{LastUpdated: entry.Timestamp, Offset: strconv.FormatInt(entry.Offset, 10)} default: // no valid offset for this entry diff --git a/comp/logs/auditor/impl/auditor.go b/comp/logs/auditor/impl/auditor.go index c5f6c61ad8aa08..a3b551504c94c0 100644 --- a/comp/logs/auditor/impl/auditor.go +++ b/comp/logs/auditor/impl/auditor.go @@ -8,7 +8,7 @@ package auditorimpl import ( "encoding/json" - "fmt" + "errors" "os" "path/filepath" "sync" @@ -410,7 +410,7 @@ func (a *registryAuditor) unmarshalRegistry(b []byte) (map[string]*RegistryEntry } version, exists := r["Version"].(float64) if !exists { - return nil, fmt.Errorf("registry retrieved from disk must have a version number") + return nil, errors.New("registry retrieved from disk must have a version number") } // ensure backward compatibility switch int(version) { @@ -421,6 +421,6 @@ func (a *registryAuditor) unmarshalRegistry(b []byte) (map[string]*RegistryEntry case 0: return unmarshalRegistryV0(b) default: - return nil, fmt.Errorf("invalid registry version number") + return nil, errors.New("invalid registry version number") } } diff --git a/comp/logs/streamlogs/impl/streamlogs.go b/comp/logs/streamlogs/impl/streamlogs.go index 3cdf816bd0363a..035d854dde8f4c 100644 --- a/comp/logs/streamlogs/impl/streamlogs.go +++ b/comp/logs/streamlogs/impl/streamlogs.go @@ -135,7 +135,7 @@ func (sl *streamlogsimpl) exportStreamLogsIfEnabled(logsAgent logsAgent.Componen slDuration := fb.GetFlareArgs().StreamLogsDuration if slDuration <= 0 { - return fmt.Errorf("remote streamlogs has been disabled via an unset duration, exiting streamlogs flare filler") + return errors.New("remote streamlogs has been disabled via an unset duration, exiting streamlogs flare filler") } streamLogParams := LogParams{ FilePath: streamlogsLogFilePath, diff --git a/comp/metadata/host/hostimpl/utils/host_test.go b/comp/metadata/host/hostimpl/utils/host_test.go index 14012bad2d26e9..85e8852c03dad7 100644 --- a/comp/metadata/host/hostimpl/utils/host_test.go +++ b/comp/metadata/host/hostimpl/utils/host_test.go @@ -7,7 +7,7 @@ package utils import ( "context" - "fmt" + "errors" "runtime" "testing" @@ -74,7 +74,7 @@ func TestGetInstallMethod(t *testing.T) { installinfoGet = orig }(installinfoGet) - installinfoGet = func(model.Reader) (*installinfo.InstallInfo, error) { return nil, fmt.Errorf("an error") } + installinfoGet = func(model.Reader) (*installinfo.InstallInfo, error) { return nil, errors.New("an error") } installMethod := getInstallMethod(conf) assert.Equal(t, "undefined", installMethod.ToolVersion) diff --git a/comp/metadata/internal/util/inventory_payload.go b/comp/metadata/internal/util/inventory_payload.go index 6296c59d1e7b24..96b7c37226509c 100644 --- a/comp/metadata/internal/util/inventory_payload.go +++ b/comp/metadata/internal/util/inventory_payload.go @@ -54,7 +54,7 @@ package util import ( "context" "encoding/json" - "fmt" + "errors" "path/filepath" "sync" "time" @@ -227,7 +227,7 @@ func (i *InventoryPayload) RefreshTriggered() bool { // GetAsJSON returns the payload as a JSON string. Useful to be displayed in the CLI or added to a flare. func (i *InventoryPayload) GetAsJSON() ([]byte, error) { if !i.Enabled { - return nil, fmt.Errorf("inventory metadata is disabled") + return nil, errors.New("inventory metadata is disabled") } i.m.Lock() diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go index 776e426bfc7f8f..1ea4acf7ab0393 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go @@ -7,7 +7,7 @@ package inventoryagentimpl import ( "bytes" - "fmt" + "errors" "runtime" "sort" "testing" @@ -88,7 +88,7 @@ func TestGetPayload(t *testing.T) { func TestInitDataErrorInstallInfo(t *testing.T) { defer func() { installinfoGet = installinfo.Get }() installinfoGet = func(config.Reader) (*installinfo.InstallInfo, error) { - return nil, fmt.Errorf("some error") + return nil, errors.New("some error") } ia := getTestInventoryPayload(t, nil, nil) @@ -361,7 +361,7 @@ func TestFetchSecurityAgent(t *testing.T) { "wrong configuration received for security-agent fetcher", ) - return "", fmt.Errorf("some error") + return "", errors.New("some error") } ia := getTestInventoryPayload(t, nil, nil) @@ -402,7 +402,7 @@ func TestFetchProcessAgent(t *testing.T) { "wrong configuration received for security-agent fetcher", ) - return "", fmt.Errorf("some error") + return "", errors.New("some error") } ia := getTestInventoryPayload(t, nil, nil) @@ -449,7 +449,7 @@ func TestFetchTraceAgent(t *testing.T) { "wrong configuration received for security-agent fetcher", ) - return "", fmt.Errorf("some error") + return "", errors.New("some error") } ia := getTestInventoryPayload(t, nil, nil) @@ -496,7 +496,7 @@ func TestFetchSystemProbeAgent(t *testing.T) { "wrong configuration received for security-agent fetcher", ) - return "", fmt.Errorf("some error") + return "", errors.New("some error") } isPrebuiltDeprecated := prebuilt.IsDeprecated() diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go index 54a39dc7142e5d..672676e29b0397 100644 --- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go +++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go @@ -6,6 +6,7 @@ package inventorychecksimpl import ( + "errors" "expvar" "fmt" "testing" @@ -152,7 +153,7 @@ func TestGetPayload(t *testing.T) { Tags: []string{"env:prod"}, }) // Register an error - src.Status.Error(fmt.Errorf("No such file or directory")) + src.Status.Error(errors.New("No such file or directory")) logSources.AddSource(src) fakeTagger := taggerfxmock.SetupFakeTagger(t) diff --git a/comp/metadata/inventoryhost/inventoryhostimpl/inventoryhost_test.go b/comp/metadata/inventoryhost/inventoryhostimpl/inventoryhost_test.go index fade5ec8c208ef..eb41a40d6fd29d 100644 --- a/comp/metadata/inventoryhost/inventoryhostimpl/inventoryhost_test.go +++ b/comp/metadata/inventoryhost/inventoryhostimpl/inventoryhost_test.go @@ -6,7 +6,7 @@ package inventoryhostimpl import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -99,7 +99,7 @@ func pkgSigningMock(_ log.Component) (bool, bool) { return true, false } func cpuErrorMock() *cpu.Info { return &cpu.Info{} } func memoryErrorMock() *memory.Info { return &memory.Info{} } -func networkErrorMock() (*network.Info, error) { return nil, fmt.Errorf("err") } +func networkErrorMock() (*network.Info, error) { return nil, errors.New("err") } func platformErrorMock() *platform.Info { return &platform.Info{} } func setupHostMetadataMock(t *testing.T) { diff --git a/comp/metadata/resources/resourcesimpl/resources_test.go b/comp/metadata/resources/resourcesimpl/resources_test.go index 09febdf77e3a31..9e8618496c9e49 100644 --- a/comp/metadata/resources/resourcesimpl/resources_test.go +++ b/comp/metadata/resources/resourcesimpl/resources_test.go @@ -11,7 +11,7 @@ import ( "bytes" "context" "encoding/json" - "fmt" + "errors" "testing" "time" @@ -118,7 +118,7 @@ func TestCollect(t *testing.T) { func TestCollectError(t *testing.T) { defer func(f func() (interface{}, error)) { collectResources = f }(collectResources) collectResources = func() (interface{}, error) { - return nil, fmt.Errorf("some error from gohai") + return nil, errors.New("some error from gohai") } s := serializermock.NewMetricSerializer(t) diff --git a/comp/netflow/flowaggregator/aggregator_test.go b/comp/netflow/flowaggregator/aggregator_test.go index e990f67214d27f..db54f42408d50f 100644 --- a/comp/netflow/flowaggregator/aggregator_test.go +++ b/comp/netflow/flowaggregator/aggregator_test.go @@ -11,6 +11,7 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -389,7 +390,7 @@ func TestFlowAggregator_flush_submitCollectorMetrics_error(t *testing.T) { aggregator := NewFlowAggregator(sender, epForwarder, &conf, "my-hostname", logger, rdnsQuerier) aggregator.goflowPrometheusGatherer = prometheus.GathererFunc(func() ([]*promClient.MetricFamily, error) { - return nil, fmt.Errorf("some prometheus gatherer error") + return nil, errors.New("some prometheus gatherer error") }) // 2/ Act @@ -507,7 +508,7 @@ func TestFlowAggregator_submitCollectorMetrics_error(t *testing.T) { aggregator := NewFlowAggregator(sender, epForwarder, &conf, "my-hostname", logger, rdnsQuerier) aggregator.goflowPrometheusGatherer = prometheus.GathererFunc(func() ([]*promClient.MetricFamily, error) { - return nil, fmt.Errorf("some prometheus gatherer error") + return nil, errors.New("some prometheus gatherer error") }) // 2/ Act diff --git a/comp/netflow/flowaggregator/testutil.go b/comp/netflow/flowaggregator/testutil.go index abb6b366231f66..21eeb8bab42ade 100644 --- a/comp/netflow/flowaggregator/testutil.go +++ b/comp/netflow/flowaggregator/testutil.go @@ -6,7 +6,7 @@ package flowaggregator import ( - "fmt" + "errors" "time" ) @@ -21,7 +21,7 @@ func WaitForFlowsToBeFlushed(aggregator *FlowAggregator, timeoutDuration time.Du select { // Got a timeout! fail with a timeout error case <-timeout: - return 0, fmt.Errorf("timeout error waiting for events") + return 0, errors.New("timeout error waiting for events") // Got a tick, we should check on doSomething() case <-ticker.C: events := aggregator.flushedFlowCount.Load() @@ -43,7 +43,7 @@ func WaitForFlowsToAccumulate(aggregator *FlowAggregator, timeoutDuration time.D select { // Got a timeout! fail with a timeout error case <-timeout: - return fmt.Errorf("timeout error waiting for events") + return errors.New("timeout error waiting for events") // Got a tick, we should check on doSomething() case <-ticker.C: // more hacky mutex locking, need to verify that flows accumulated by reading shared memory diff --git a/comp/netflow/goflowlib/formatdriver.go b/comp/netflow/goflowlib/formatdriver.go index f4e64004faeec4..a32beac60c1563 100644 --- a/comp/netflow/goflowlib/formatdriver.go +++ b/comp/netflow/goflowlib/formatdriver.go @@ -7,7 +7,8 @@ package goflowlib import ( "context" - "fmt" + "errors" + "go.uber.org/atomic" "github.com/DataDog/datadog-agent/comp/netflow/common" @@ -50,7 +51,7 @@ func (d *AggregatorFormatDriver) Format(data interface{}) ([]byte, []byte, error d.listenerFlowCount.Add(1) d.flowAggIn <- ConvertFlowWithAdditionalFields(flow, d.namespace) default: - return nil, nil, fmt.Errorf("message is not flowpb.FlowMessage or common.FlowMessageWithAdditionalFields") + return nil, nil, errors.New("message is not flowpb.FlowMessage or common.FlowMessageWithAdditionalFields") } return nil, nil, nil diff --git a/comp/notableevents/impl/submitter.go b/comp/notableevents/impl/submitter.go index 4a192d07706313..48e3d74e50152f 100644 --- a/comp/notableevents/impl/submitter.go +++ b/comp/notableevents/impl/submitter.go @@ -79,8 +79,8 @@ func (s *submitter) submitEvent(payload eventPayload) error { // Create base tags for the event tags := []string{ - fmt.Sprintf("channel:%s", payload.Channel), - fmt.Sprintf("provider:%s", payload.Provider), + "channel:" + payload.Channel, + "provider:" + payload.Provider, fmt.Sprintf("event_id:%d", payload.EventID), "source:windows_event_log", } diff --git a/comp/otelcol/collector/impl-pipeline/flare_filler.go b/comp/otelcol/collector/impl-pipeline/flare_filler.go index 598c8b7e7886c5..2801c0b83e0bfa 100644 --- a/comp/otelcol/collector/impl-pipeline/flare_filler.go +++ b/comp/otelcol/collector/impl-pipeline/flare_filler.go @@ -61,7 +61,7 @@ func (c *collectorImpl) fillFlare(fb flaretypes.FlareBuilder) error { sourceURLs := src.URLs for _, sourceURL := range sourceURLs { if !strings.HasPrefix(sourceURL, "http://") && !strings.HasPrefix(sourceURL, "https://") { - sourceURL = fmt.Sprintf("http://%s", sourceURL) + sourceURL = "http://" + sourceURL } urll, err := url.Parse(sourceURL) diff --git a/comp/otelcol/ddflareextension/impl/config.go b/comp/otelcol/ddflareextension/impl/config.go index 640ed5c2030afe..120a7098253f7f 100644 --- a/comp/otelcol/ddflareextension/impl/config.go +++ b/comp/otelcol/ddflareextension/impl/config.go @@ -8,7 +8,6 @@ package ddflareextensionimpl import ( "errors" - "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" @@ -75,17 +74,17 @@ func healthExtractEndpoint(c *confmap.Conf) (string, error) { func regularStringEndpointExtractor(c *confmap.Conf) (string, error) { if c == nil { - return "", fmt.Errorf("nil confmap - skipping") + return "", errors.New("nil confmap - skipping") } element := c.Get("endpoint") if element == nil { - return "", fmt.Errorf("Expected endpoint conf element, but none found") + return "", errors.New("Expected endpoint conf element, but none found") } endpoint, ok := element.(string) if !ok { - return "", fmt.Errorf("endpoint conf element was unexpectedly not a string") + return "", errors.New("endpoint conf element was unexpectedly not a string") } return endpoint, nil } diff --git a/comp/otelcol/ddflareextension/impl/envconfmap.go b/comp/otelcol/ddflareextension/impl/envconfmap.go index 98575cae72297e..ce949891e386c1 100644 --- a/comp/otelcol/ddflareextension/impl/envconfmap.go +++ b/comp/otelcol/ddflareextension/impl/envconfmap.go @@ -7,6 +7,7 @@ package ddflareextensionimpl import ( "context" + "errors" "fmt" "slices" @@ -34,7 +35,7 @@ func newEnvConfMap(ctx context.Context, configProviderSettings otelcol.ConfigPro return f.Create(providersSettings).Scheme() == schemeName }) if envProviderIndex == -1 { - return nil, fmt.Errorf("env provider not found") + return nil, errors.New("env provider not found") } envProvider := providerFactories[envProviderIndex] uuids := make(map[string]string) diff --git a/comp/otelcol/ddprofilingextension/impl/extension.go b/comp/otelcol/ddprofilingextension/impl/extension.go index 2ccb1b7220f35a..3d4e11bd1c34cd 100644 --- a/comp/otelcol/ddprofilingextension/impl/extension.go +++ b/comp/otelcol/ddprofilingextension/impl/extension.go @@ -9,7 +9,6 @@ package ddprofilingextensionimpl import ( "context" "errors" - "fmt" "net/http" "runtime/debug" "strings" @@ -128,19 +127,19 @@ func (e *ddExtension) startForOCB() error { } } } - tags.WriteString(fmt.Sprintf("agent_version:%s", agentVersion)) + tags.WriteString("agent_version:" + agentVersion) tags.WriteString(",source:oss-ddprofilingextension") if e.cfg.ProfilerOptions.Env != "" { - tags.WriteString(fmt.Sprintf(",default_env:%s", e.cfg.ProfilerOptions.Env)) + tags.WriteString(",default_env:" + e.cfg.ProfilerOptions.Env) } if source.Kind == "host" { profilerOptions = append(profilerOptions, profiler.WithHostname(source.Identifier)) - tags.WriteString(fmt.Sprintf(",host:%s", source.Identifier)) + tags.WriteString(",host:" + source.Identifier) } if source.Kind == "task_arn" { - tags.WriteString(fmt.Sprintf(",orchestrator:fargate_ecs,task_arn:%s", source.Identifier)) + tags.WriteString(",orchestrator:fargate_ecs,task_arn:" + source.Identifier) } cl := new(http.Client) diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/factory.go b/comp/otelcol/otlp/components/exporter/datadogexporter/factory.go index 1c518caa434e0b..069eb3e3225b78 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/factory.go +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/factory.go @@ -8,6 +8,7 @@ package datadogexporter import ( "context" + "errors" "fmt" "runtime" "sync" @@ -161,7 +162,7 @@ func (f *factory) createTracesExporter( } if cfg.OnlyMetadata { - return nil, fmt.Errorf("datadog::only_metadata should not be set in OTel Agent") + return nil, errors.New("datadog::only_metadata should not be set in OTel Agent") } tracex := newTracesExporter(ctx, set, cfg, f.traceagentcmp, f.gatewayUsage, f.store.DDOTTraces, f.reporter) diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go index b7ec4630d92aaf..41e674af64463a 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/logs_exporter_test.go @@ -10,7 +10,7 @@ import ( "encoding/binary" "encoding/hex" "encoding/json" - "fmt" + "strconv" "testing" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil" @@ -54,13 +54,13 @@ func TestLogsExporter(t *testing.T) { "instance_num": float64(1), "@timestamp": testutil.TestLogTime.Format("2006-01-02T15:04:05.000Z07:00"), "status": "Info", - "dd.span_id": fmt.Sprintf("%d", spanIDToUint64(ld.SpanID())), - "dd.trace_id": fmt.Sprintf("%d", traceIDToUint64(ld.TraceID())), + "dd.span_id": strconv.FormatUint(spanIDToUint64(ld.SpanID()), 10), + "dd.trace_id": strconv.FormatUint(traceIDToUint64(ld.TraceID()), 10), "otel.severity_text": "Info", "otel.severity_number": "9", "otel.span_id": spanIDToHexOrEmptyString(ld.SpanID()), "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", }, }, @@ -89,13 +89,13 @@ func TestLogsExporter(t *testing.T) { "datadog.log.source": "custom_source", "@timestamp": testutil.TestLogTime.Format("2006-01-02T15:04:05.000Z07:00"), "status": "Info", - "dd.span_id": fmt.Sprintf("%d", spanIDToUint64(ld.SpanID())), - "dd.trace_id": fmt.Sprintf("%d", traceIDToUint64(ld.TraceID())), + "dd.span_id": strconv.FormatUint(spanIDToUint64(ld.SpanID()), 10), + "dd.trace_id": strconv.FormatUint(traceIDToUint64(ld.TraceID()), 10), "otel.severity_text": "Info", "otel.severity_number": "9", "otel.span_id": spanIDToHexOrEmptyString(ld.SpanID()), "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", "host.name": "test-host", "hostname": "test-host", @@ -125,13 +125,13 @@ func TestLogsExporter(t *testing.T) { "datadog.log.source": "custom_source_rattr", "@timestamp": testutil.TestLogTime.Format("2006-01-02T15:04:05.000Z07:00"), "status": "Info", - "dd.span_id": fmt.Sprintf("%d", spanIDToUint64(ld.SpanID())), - "dd.trace_id": fmt.Sprintf("%d", traceIDToUint64(ld.TraceID())), + "dd.span_id": strconv.FormatUint(spanIDToUint64(ld.SpanID()), 10), + "dd.trace_id": strconv.FormatUint(traceIDToUint64(ld.TraceID()), 10), "otel.severity_text": "Info", "otel.severity_number": "9", "otel.span_id": spanIDToHexOrEmptyString(ld.SpanID()), "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", }, }, @@ -157,13 +157,13 @@ func TestLogsExporter(t *testing.T) { "instance_num": float64(1), "@timestamp": testutil.TestLogTime.Format("2006-01-02T15:04:05.000Z07:00"), "status": "Fatal", - "dd.span_id": fmt.Sprintf("%d", spanIDToUint64(ld.SpanID())), - "dd.trace_id": fmt.Sprintf("%d", traceIDToUint64(ld.TraceID())), + "dd.span_id": strconv.FormatUint(spanIDToUint64(ld.SpanID()), 10), + "dd.trace_id": strconv.FormatUint(traceIDToUint64(ld.TraceID()), 10), "otel.severity_text": "Fatal", "otel.severity_number": "9", "otel.span_id": spanIDToHexOrEmptyString(ld.SpanID()), "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", }, }, @@ -189,13 +189,13 @@ func TestLogsExporter(t *testing.T) { "instance_num": float64(1), "@timestamp": testutil.TestLogTime.Format("2006-01-02T15:04:05.000Z07:00"), "status": "Info", - "dd.span_id": fmt.Sprintf("%d", spanIDToUint64(ld.SpanID())), - "dd.trace_id": fmt.Sprintf("%d", traceIDToUint64(ld.TraceID())), + "dd.span_id": strconv.FormatUint(spanIDToUint64(ld.SpanID()), 10), + "dd.trace_id": strconv.FormatUint(traceIDToUint64(ld.TraceID()), 10), "otel.severity_text": "Info", "otel.severity_number": "9", "otel.span_id": spanIDToHexOrEmptyString(ld.SpanID()), "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", }, }, @@ -223,13 +223,13 @@ func TestLogsExporter(t *testing.T) { "instance_num": float64(1), "@timestamp": testutil.TestLogTime.Format("2006-01-02T15:04:05.000Z07:00"), "status": "Info", - "dd.span_id": fmt.Sprintf("%d", spanIDToUint64(ld.SpanID())), - "dd.trace_id": fmt.Sprintf("%d", traceIDToUint64(ld.TraceID())), + "dd.span_id": strconv.FormatUint(spanIDToUint64(ld.SpanID()), 10), + "dd.trace_id": strconv.FormatUint(traceIDToUint64(ld.TraceID()), 10), "otel.severity_text": "Info", "otel.severity_number": "9", "otel.span_id": spanIDToHexOrEmptyString(ld.SpanID()), "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", }, { @@ -240,7 +240,7 @@ func TestLogsExporter(t *testing.T) { "status": "Info", "otel.severity_text": "Info", "otel.severity_number": "9", - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", }, }, @@ -268,13 +268,13 @@ func TestLogsExporter(t *testing.T) { "instance_num": float64(1), "@timestamp": testutil.TestLogTime.Format("2006-01-02T15:04:05.000Z07:00"), "status": "Info", - "dd.span_id": fmt.Sprintf("%d", spanIDToUint64(ld.SpanID())), - "dd.trace_id": fmt.Sprintf("%d", traceIDToUint64(ld.TraceID())), + "dd.span_id": strconv.FormatUint(spanIDToUint64(ld.SpanID()), 10), + "dd.trace_id": strconv.FormatUint(traceIDToUint64(ld.TraceID()), 10), "otel.severity_text": "Info", "otel.severity_number": "9", "otel.span_id": spanIDToHexOrEmptyString(ld.SpanID()), "otel.trace_id": traceIDToHexOrEmptyString(ld.TraceID()), - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", }, { @@ -285,7 +285,7 @@ func TestLogsExporter(t *testing.T) { "status": "Info", "otel.severity_text": "Info", "otel.severity_number": "9", - "otel.timestamp": fmt.Sprintf("%d", testutil.TestLogTime.UnixNano()), + "otel.timestamp": strconv.FormatInt(testutil.TestLogTime.UnixNano(), 10), "resource-attr": "resource-attr-val-1", }, }, diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/config.go b/comp/otelcol/otlp/components/exporter/serializerexporter/config.go index d7e24186f488e0..ef2848c3678d8f 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/config.go +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/config.go @@ -57,7 +57,7 @@ type ExporterConfig struct { func (c *ExporterConfig) Validate() error { histCfg := c.Metrics.Metrics.HistConfig if histCfg.Mode == datadogconfig.HistogramModeNoBuckets && !histCfg.SendAggregations { - return fmt.Errorf("'nobuckets' mode and `send_aggregation_metrics` set to false will send no histogram metrics") + return errors.New("'nobuckets' mode and `send_aggregation_metrics` set to false will send no histogram metrics") } if c.HostMetadata.Enabled && c.HostMetadata.ReporterPeriod < 5*time.Minute { @@ -89,7 +89,7 @@ func (c *ExporterConfig) Unmarshal(configMap *confmap.Conf) error { // If an endpoint is not explicitly set, override it based on the site. if !configMap.IsSet("metrics::endpoint") { - c.Metrics.Metrics.Endpoint = fmt.Sprintf("https://api.%s", c.API.Site) + c.Metrics.Metrics.Endpoint = "https://api." + c.API.Site } // Return an error if an endpoint is explicitly set to "" diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/serializer.go b/comp/otelcol/otlp/components/exporter/serializerexporter/serializer.go index 7eca2630b68dce..d3821ccef0467c 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/serializer.go +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/serializer.go @@ -7,7 +7,7 @@ package serializerexporter import ( "context" - "fmt" + "errors" "strings" "github.com/DataDog/datadog-agent/comp/core/config" @@ -176,7 +176,7 @@ func InitSerializer(logger *zap.Logger, cfg *ExporterConfig, sourceProvider sour } fw, ok := f.(*defaultforwarder.DefaultForwarder) if !ok { - return nil, nil, fmt.Errorf("failed to cast forwarder to defaultforwarder.DefaultForwarder") + return nil, nil, errors.New("failed to cast forwarder to defaultforwarder.DefaultForwarder") } return s, fw, nil } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go index 980eba3b82f0c3..45359b87da7921 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/common.go @@ -177,11 +177,11 @@ func entityIDsFromAttributes(attrs pcommon.Map) []types.EntityID { } } if namespace, ok := attrs.Get(string(conventions.K8SNamespaceNameKey)); ok { - entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, fmt.Sprintf("/namespaces//%s", namespace.AsString()))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, "/namespaces//"+namespace.AsString())) } if nodeName, ok := attrs.Get(string(conventions.K8SNodeNameKey)); ok { - entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, fmt.Sprintf("/nodes//%s", nodeName.AsString()))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, "/nodes//"+nodeName.AsString())) } if podUID, ok := attrs.Get(string(conventions.K8SPodUIDKey)); ok { entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesPodUID, podUID.AsString())) diff --git a/comp/otelcol/otlp/config.go b/comp/otelcol/otlp/config.go index 565e78d7ff4646..9d7ba667d86541 100644 --- a/comp/otelcol/otlp/config.go +++ b/comp/otelcol/otlp/config.go @@ -9,6 +9,7 @@ package otlp import ( "context" + "errors" "fmt" "strings" @@ -43,7 +44,7 @@ func FromAgentConfig(cfg config.Reader) (PipelineConfig, error) { tracesEnabled := cfg.GetBool(coreconfig.OTLPTracesEnabled) logsEnabled := cfg.GetBool(coreconfig.OTLPLogsEnabled) if !metricsEnabled && !tracesEnabled && !logsEnabled { - errs = append(errs, fmt.Errorf("at least one OTLP signal needs to be enabled")) + errs = append(errs, errors.New("at least one OTLP signal needs to be enabled")) } logsConfig := configcheck.ReadConfigSection(cfg, coreconfig.OTLPLogs) diff --git a/comp/otelcol/otlp/internal/configutils/utils_test.go b/comp/otelcol/otlp/internal/configutils/utils_test.go index b964552a314b10..c0f099b564f53a 100644 --- a/comp/otelcol/otlp/internal/configutils/utils_test.go +++ b/comp/otelcol/otlp/internal/configutils/utils_test.go @@ -7,7 +7,6 @@ package configutils import ( "context" - "fmt" "os" "testing" @@ -67,7 +66,7 @@ func TestNewConfigProviderFromMap(t *testing.T) { // build default provider from same data settings := otelcol.ConfigProviderSettings{ ResolverSettings: confmap.ResolverSettings{ - URIs: []string{fmt.Sprintf("file:%s", testPath)}, + URIs: []string{"file:" + testPath}, ProviderFactories: []confmap.ProviderFactory{ fileprovider.NewFactory(), envprovider.NewFactory(), diff --git a/comp/otelcol/otlp/pipeline_validator.go b/comp/otelcol/otlp/pipeline_validator.go index 956804de9e3637..9459ea33ab4198 100644 --- a/comp/otelcol/otlp/pipeline_validator.go +++ b/comp/otelcol/otlp/pipeline_validator.go @@ -8,7 +8,7 @@ package otlp import ( - "fmt" + "errors" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/logs/message" @@ -16,7 +16,7 @@ import ( func checkAndUpdateCfg(_ config.Component, pcfg PipelineConfig, logsAgentChannel chan *message.Message) error { if pcfg.LogsEnabled && logsAgentChannel == nil { - pipelineError.Store(fmt.Errorf("OTLP logs is enabled but logs agent is not enabled")) + pipelineError.Store(errors.New("OTLP logs is enabled but logs agent is not enabled")) return pipelineError.Load() } return nil diff --git a/comp/process/agent/flare.go b/comp/process/agent/flare.go index 23f71fbd675f9e..840d6f465ee0c7 100644 --- a/comp/process/agent/flare.go +++ b/comp/process/agent/flare.go @@ -8,7 +8,6 @@ package agent import ( "encoding/json" - "fmt" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" "github.com/DataDog/datadog-agent/pkg/process/checks" @@ -32,7 +31,7 @@ func (fh *FlareHelper) FillFlare(fb flaretypes.FlareBuilder) error { } checkName := check.Name() - filename := fmt.Sprintf("%s_check_output.json", checkName) + filename := checkName + "_check_output.json" fb.AddFileFromFunc(filename, func() ([]byte, error) { //nolint:errcheck checkOutput, ok := checks.GetCheckOutput(checkName) if !ok { @@ -40,7 +39,7 @@ func (fh *FlareHelper) FillFlare(fb flaretypes.FlareBuilder) error { } checkJSON, err := json.MarshalIndent(checkOutput, "", " ") if err != nil { - return []byte(fmt.Sprintf("error: %s", err.Error())), err + return []byte("error: " + err.Error()), err } return checkJSON, nil }) diff --git a/comp/process/agent/status.go b/comp/process/agent/status.go index 06c4d1c259de5f..dd9d03fef63d03 100644 --- a/comp/process/agent/status.go +++ b/comp/process/agent/status.go @@ -68,7 +68,7 @@ func (s StatusProvider) populateStatus() map[string]interface{} { // Get expVar server address ipcAddr, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { - status["error"] = fmt.Sprintf("%v", err.Error()) + status["error"] = err.Error() return status } @@ -79,21 +79,21 @@ func (s StatusProvider) populateStatus() map[string]interface{} { agentStatus, err := processStatus.GetStatus(s.config, url, s.hostname) if err != nil { - status["error"] = fmt.Sprintf("%v", err.Error()) + status["error"] = err.Error() return status } bytes, err := json.Marshal(agentStatus) if err != nil { return map[string]interface{}{ - "error": fmt.Sprintf("%v", err.Error()), + "error": err.Error(), } } err = json.Unmarshal(bytes, &status) if err != nil { return map[string]interface{}{ - "error": fmt.Sprintf("%v", err.Error()), + "error": err.Error(), } } diff --git a/comp/process/status/statusimpl/status.go b/comp/process/status/statusimpl/status.go index 3cdb7c379ef9bf..3449756de91204 100644 --- a/comp/process/status/statusimpl/status.go +++ b/comp/process/status/statusimpl/status.go @@ -90,7 +90,7 @@ func (s statusProvider) populateStatus() map[string]interface{} { // Get expVar server address ipcAddr, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { - status["error"] = fmt.Sprintf("%v", err.Error()) + status["error"] = err.Error() return status } @@ -103,21 +103,21 @@ func (s statusProvider) populateStatus() map[string]interface{} { agentStatus, err := processStatus.GetStatus(s.config, url, s.hostname) if err != nil { - status["error"] = fmt.Sprintf("%v", err.Error()) + status["error"] = err.Error() return status } bytes, err := json.Marshal(agentStatus) if err != nil { return map[string]interface{}{ - "error": fmt.Sprintf("%v", err.Error()), + "error": err.Error(), } } err = json.Unmarshal(bytes, &status) if err != nil { return map[string]interface{}{ - "error": fmt.Sprintf("%v", err.Error()), + "error": err.Error(), } } diff --git a/comp/rdnsquerier/impl/rdnsquerier_test.go b/comp/rdnsquerier/impl/rdnsquerier_test.go index 81d80b8ea2f7c8..3dc9a031e9ed9e 100644 --- a/comp/rdnsquerier/impl/rdnsquerier_test.go +++ b/comp/rdnsquerier/impl/rdnsquerier_test.go @@ -9,6 +9,7 @@ package rdnsquerierimpl import ( "context" + "errors" "fmt" "net" "sync" @@ -580,13 +581,13 @@ func TestRetries(t *testing.T) { ts := testSetup(t, overrides, true, map[string]*fakeResults{ "192.168.1.100": {errors: []error{ - fmt.Errorf("test error1"), - fmt.Errorf("test error2")}, + errors.New("test error1"), + errors.New("test error2")}, }, "192.168.1.101": {errors: []error{ &net.DNSError{Err: "test timeout error", IsTimeout: true}, &net.DNSError{Err: "test temporary error", IsTemporary: true}, - fmt.Errorf("test error")}, + errors.New("test error")}, }, }, 0, @@ -672,9 +673,9 @@ func TestRetriesExceeded(t *testing.T) { ts := testSetup(t, overrides, true, map[string]*fakeResults{ "192.168.1.100": {errors: []error{ - fmt.Errorf("test error1"), - fmt.Errorf("test error2"), - fmt.Errorf("test error3")}, + errors.New("test error1"), + errors.New("test error2"), + errors.New("test error3")}, }, }, 0, @@ -1166,7 +1167,7 @@ func TestGetHostnames(t *testing.T) { ipAddrs: []string{"invalid_ip", "192.168.1.102", "8.8.8.8", "192.168.1.100"}, timeout: 1 * time.Second, expected: map[string]rdnsquerierdef.ReverseDNSResult{ - "invalid_ip": {IP: "invalid_ip", Err: fmt.Errorf("invalid IP address invalid_ip")}, + "invalid_ip": {IP: "invalid_ip", Err: errors.New("invalid IP address invalid_ip")}, "192.168.1.102": {IP: "192.168.1.102", Hostname: "fakehostname-192.168.1.102"}, "8.8.8.8": {IP: "8.8.8.8"}, "192.168.1.100": {IP: "192.168.1.100", Hostname: "fakehostname-192.168.1.100"}, @@ -1178,8 +1179,8 @@ func TestGetHostnames(t *testing.T) { ipAddrs: []string{"192.168.1.105", "invalid", "8.8.8.8"}, timeout: 1 * time.Second, expected: map[string]rdnsquerierdef.ReverseDNSResult{ - "192.168.1.105": {IP: "192.168.1.105", Err: fmt.Errorf("timeout reached while resolving hostname for IP address 192.168.1.105")}, - "invalid": {IP: "invalid", Err: fmt.Errorf("invalid IP address invalid")}, + "192.168.1.105": {IP: "192.168.1.105", Err: errors.New("timeout reached while resolving hostname for IP address 192.168.1.105")}, + "invalid": {IP: "invalid", Err: errors.New("invalid IP address invalid")}, "8.8.8.8": {IP: "8.8.8.8"}, }, }, diff --git a/comp/remote-config/rcclient/rcclientimpl/rcclient.go b/comp/remote-config/rcclient/rcclientimpl/rcclient.go index 26f8af117f6195..74a170effe9802 100644 --- a/comp/remote-config/rcclient/rcclientimpl/rcclient.go +++ b/comp/remote-config/rcclient/rcclientimpl/rcclient.go @@ -88,7 +88,7 @@ func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { } if deps.Params.AgentName == "" || deps.Params.AgentVersion == "" { - return nil, fmt.Errorf("Remote config client is missing agent name or version parameter") + return nil, errors.New("Remote config client is missing agent name or version parameter") } // Append client options diff --git a/comp/snmptraps/config/config_test.go b/comp/snmptraps/config/config_test.go index 3e5f55389cf4c1..dff290030b3034 100644 --- a/comp/snmptraps/config/config_test.go +++ b/comp/snmptraps/config/config_test.go @@ -7,7 +7,6 @@ package config import ( "context" - "fmt" "strings" "testing" @@ -104,7 +103,7 @@ func buildDDConfig(t testing.TB, trapConfig *TrapsConfig, globalNamespace string err := mapstructure.Decode(trapConfig, &rawTrapConfig) require.NoError(t, err) for k, v := range rawTrapConfig { - k = fmt.Sprintf("network_devices.snmp_traps.%s", k) + k = "network_devices.snmp_traps." + k ddcfg.SetWithoutSource(k, v) } } diff --git a/comp/snmptraps/oidresolver/oidresolverimpl/oid_resolver_test.go b/comp/snmptraps/oidresolver/oidresolverimpl/oid_resolver_test.go index 00a20d8f8b5ef7..c766cc4a213445 100644 --- a/comp/snmptraps/oidresolver/oidresolverimpl/oid_resolver_test.go +++ b/comp/snmptraps/oidresolver/oidresolverimpl/oid_resolver_test.go @@ -8,9 +8,9 @@ package oidresolverimpl import ( "bytes" "encoding/json" - "fmt" "io/fs" "math/rand" + "strconv" "strings" "testing" @@ -299,7 +299,7 @@ func TestIsValidOID_PropertyBasedTesting(t *testing.T) { oidLen := rand.Intn(100) + 2 oidParts := make([]string, oidLen) for j := 0; j < oidLen; j++ { - oidParts[j] = fmt.Sprint(rand.Intn(100000)) + oidParts[j] = strconv.Itoa(rand.Intn(100000)) } recreatedOID := strings.Join(oidParts, ".") if rand.Intn(2) == 0 { diff --git a/comp/softwareinventory/impl/inventorysoftware.go b/comp/softwareinventory/impl/inventorysoftware.go index 3ff8f7f220ace8..c4436a24847f17 100644 --- a/comp/softwareinventory/impl/inventorysoftware.go +++ b/comp/softwareinventory/impl/inventorysoftware.go @@ -10,6 +10,7 @@ package softwareinventoryimpl import ( "context" + "errors" "fmt" "math/rand" "net/http" @@ -231,7 +232,7 @@ func (is *softwareInventory) startSoftwareInventoryCollection(ctx context.Contex func (is *softwareInventory) sendPayload() error { forwarder, ok := is.eventPlatform.Get() if !ok { - return fmt.Errorf("event platform forwarder not available") + return errors.New("event platform forwarder not available") } payload := is.getPayload() diff --git a/comp/softwareinventory/impl/inventorysoftware_test.go b/comp/softwareinventory/impl/inventorysoftware_test.go index db7c9d42683acc..e2b8f8ea7a927a 100644 --- a/comp/softwareinventory/impl/inventorysoftware_test.go +++ b/comp/softwareinventory/impl/inventorysoftware_test.go @@ -7,16 +7,17 @@ package softwareinventoryimpl import ( "encoding/json" - "fmt" + "errors" + "net/http" + "net/http/httptest" + "testing" + compdef "github.com/DataDog/datadog-agent/comp/def" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/pkg/util/compression" "github.com/DataDog/datadog-agent/pkg/util/compression/selector" "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/stretchr/testify/require" - "net/http" - "net/http/httptest" - "testing" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/flare/helpers" @@ -97,7 +98,7 @@ func TestFlareProviderOutputDisabled(t *testing.T) { func TestFlareProviderOutputFailed(t *testing.T) { f := newFixtureWithData(t, true, []software.Entry{{DisplayName: "TestApp"}}) f.sysProbeClient = &mockSysProbeClient{} - f.sysProbeClient.On("GetCheck", sysconfig.SoftwareInventoryModule).Return(nil, fmt.Errorf("error")) + f.sysProbeClient.On("GetCheck", sysconfig.SoftwareInventoryModule).Return(nil, errors.New("error")) is := f.sut() flareProvider := is.FlareProvider() diff --git a/comp/syntheticstestscheduler/impl/worker.go b/comp/syntheticstestscheduler/impl/worker.go index d15ca7c1e94852..e9717acbcd23ea 100644 --- a/comp/syntheticstestscheduler/impl/worker.go +++ b/comp/syntheticstestscheduler/impl/worker.go @@ -9,6 +9,7 @@ import ( "context" "crypto/rand" "encoding/json" + "errors" "fmt" "io" "math/big" @@ -169,7 +170,7 @@ func (s *syntheticsTestScheduler) runWorker(ctx context.Context, workerID int) { s.log.Debugf("[worker%d] error sending result: %s, publicID %s", workerID, err, syntheticsTestCtx.cfg.PublicID) s.statsdClient.Incr(syntheticsMetricPrefix+"evp.send_result_failure", []string{"reason:error_sending_result", fmt.Sprintf("org_id:%d", syntheticsTestCtx.cfg.OrgID), fmt.Sprintf("subtype:%s", syntheticsTestCtx.cfg.Config.Request.GetSubType())}, 1) //nolint:errcheck } - s.statsdClient.Incr(syntheticsMetricPrefix+"checks_processed", []string{fmt.Sprintf("status:%s", status), fmt.Sprintf("org_id:%d", syntheticsTestCtx.cfg.OrgID), fmt.Sprintf("subtype:%s", syntheticsTestCtx.cfg.Config.Request.GetSubType())}, 1) //nolint:errcheck + s.statsdClient.Incr(syntheticsMetricPrefix+"checks_processed", []string{"status:" + status, fmt.Sprintf("org_id:%d", syntheticsTestCtx.cfg.OrgID), fmt.Sprintf("subtype:%s", syntheticsTestCtx.cfg.Config.Request.GetSubType())}, 1) //nolint:errcheck } } } @@ -219,7 +220,7 @@ func toNetpathConfig(c common.SyntheticsTestConfig) (config.Config, error) { case common.UDPConfigRequest: req, ok := c.Config.Request.(common.UDPConfigRequest) if !ok { - return config.Config{}, fmt.Errorf("invalid UDP request type") + return config.Config{}, errors.New("invalid UDP request type") } cfg.Protocol = payload.ProtocolUDP cfg.DestHostname = req.Host @@ -231,7 +232,7 @@ func toNetpathConfig(c common.SyntheticsTestConfig) (config.Config, error) { case common.TCPConfigRequest: req, ok := c.Config.Request.(common.TCPConfigRequest) if !ok { - return config.Config{}, fmt.Errorf("invalid TCP request type") + return config.Config{}, errors.New("invalid TCP request type") } cfg.Protocol = payload.ProtocolTCP cfg.DestHostname = req.Host @@ -243,7 +244,7 @@ func toNetpathConfig(c common.SyntheticsTestConfig) (config.Config, error) { case common.ICMPConfigRequest: req, ok := c.Config.Request.(common.ICMPConfigRequest) if !ok { - return config.Config{}, fmt.Errorf("invalid ICMP request type") + return config.Config{}, errors.New("invalid ICMP request type") } cfg.Protocol = payload.ProtocolICMP cfg.DestHostname = req.Host @@ -392,7 +393,7 @@ func (s *syntheticsTestScheduler) networkPathToTestResult(w *workerResult) (*com return &common.TestResult{ Location: struct { ID string `json:"id"` - }{ID: fmt.Sprintf("agent:%s", w.hostname)}, + }{ID: "agent:" + w.hostname}, DD: make(map[string]interface{}), Result: result, Test: t, diff --git a/comp/syntheticstestscheduler/impl/worker_test.go b/comp/syntheticstestscheduler/impl/worker_test.go index 53edfede4a4017..44a6f7fd1c1d66 100644 --- a/comp/syntheticstestscheduler/impl/worker_test.go +++ b/comp/syntheticstestscheduler/impl/worker_test.go @@ -7,7 +7,7 @@ package syntheticstestschedulerimpl import ( "crypto/rand" - "fmt" + "errors" "io" "math" "math/big" @@ -350,7 +350,7 @@ func TestNetworkPathToTestResult(t *testing.T) { name: "failure case", worker: workerResult{ tracerouteResult: payload.NetworkPath{}, - tracerouteError: fmt.Errorf("connection timeout"), + tracerouteError: errors.New("connection timeout"), tracerouteCfg: trCfg, testCfg: SyntheticsTestCtx{ cfg: common.SyntheticsTestConfig{ @@ -429,7 +429,7 @@ func TestGenerateRandomStringUInt63(t *testing.T) { t.Run("error path", func(t *testing.T) { randIntFn := func(_ io.Reader, _ *big.Int) (*big.Int, error) { - return nil, fmt.Errorf("some errors") + return nil, errors.New("some errors") } got, err := generateRandomStringUInt63(randIntFn) diff --git a/comp/systray/systray/systrayimpl/doconfigure.go b/comp/systray/systray/systrayimpl/doconfigure.go index 2ce64747cb834c..231cd793e3137b 100644 --- a/comp/systray/systray/systrayimpl/doconfigure.go +++ b/comp/systray/systray/systrayimpl/doconfigure.go @@ -8,6 +8,7 @@ package systrayimpl import ( "context" + "errors" "fmt" "net" "time" @@ -42,7 +43,7 @@ func doConfigure(s *systrayImpl) error { guiPort := s.config.GetString("GUI_port") if guiPort == "-1" { - return fmt.Errorf("GUI not enabled: to enable, please set an appropriate port in your datadog.yaml file") + return errors.New("GUI not enabled: to enable, please set an appropriate port in your datadog.yaml file") } // 'http://localhost' is preferred over 'http://127.0.0.1' due to Internet Explorer behavior. diff --git a/comp/systray/systray/systrayimpl/systray.go b/comp/systray/systray/systrayimpl/systray.go index b0cd02e264d7e5..6d90e4bb7648c0 100644 --- a/comp/systray/systray/systrayimpl/systray.go +++ b/comp/systray/systray/systrayimpl/systray.go @@ -12,6 +12,7 @@ import "C" import ( "context" + "errors" "fmt" "os/exec" "runtime" @@ -118,7 +119,7 @@ func newSystray(deps dependencies) (systray.Component, error) { return nil, fmt.Errorf("failed to call IsUserAnAdmin %v", err) } if !isAdmin { - return nil, fmt.Errorf("not running as an admin, systray requires administrative privileges") + return nil, errors.New("not running as an admin, systray requires administrative privileges") } // fx init @@ -246,7 +247,7 @@ func acquireProcessSingleton(eventname string) (windows.Handle, error) { if h != windows.Handle(0) { windows.CloseHandle(h) - return windows.Handle(0), fmt.Errorf("systray is already running") + return windows.Handle(0), errors.New("systray is already running") } } diff --git a/comp/trace/agent/impl/agent.go b/comp/trace/agent/impl/agent.go index 23d0a96e27b23d..36358df95cf98e 100644 --- a/comp/trace/agent/impl/agent.go +++ b/comp/trace/agent/impl/agent.go @@ -113,7 +113,7 @@ func NewAgent(deps dependencies) (traceagent.Component, error) { tracecfg := deps.Config.Object() if !tracecfg.Enabled { log.Info(messageAgentDisabled) - deps.TelemetryCollector.SendStartupError(telemetry.TraceAgentNotEnabled, fmt.Errorf("")) + deps.TelemetryCollector.SendStartupError(telemetry.TraceAgentNotEnabled, errors.New("")) // Required to signal that the whole app must stop. _ = deps.Shutdowner.Shutdown() return c, nil diff --git a/comp/trace/config/setup.go b/comp/trace/config/setup.go index 073841d265984d..82f7cedf755e11 100644 --- a/comp/trace/config/setup.go +++ b/comp/trace/config/setup.go @@ -174,7 +174,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error obsPipelineEnabled, prefix := isObsPipelineEnabled(core) if obsPipelineEnabled { - if host := core.GetString(fmt.Sprintf("%s.traces.url", prefix)); host == "" { + if host := core.GetString(prefix + ".traces.url"); host == "" { log.Errorf("%s.traces.enabled but %s.traces.url is empty.", prefix, prefix) } else { c.Endpoints[0].Host = host diff --git a/comp/trace/etwtracer/etwtracerimpl/etwtracerimpl.go b/comp/trace/etwtracer/etwtracerimpl/etwtracerimpl.go index a0c8a237caeae1..4b442e4a49d051 100644 --- a/comp/trace/etwtracer/etwtracerimpl/etwtracerimpl.go +++ b/comp/trace/etwtracer/etwtracerimpl/etwtracerimpl.go @@ -455,7 +455,7 @@ func (a *etwtracerimpl) stop(_ context.Context) error { func (a *etwtracerimpl) addPID(pid uint32) error { if len(a.pids) >= MAX_EVENT_FILTER_PID_COUNT { - return fmt.Errorf("too many processes registered") + return errors.New("too many processes registered") } c, err := winio.DialPipe(fmt.Sprintf(clientNamedPipePath, pid), nil) if err != nil { diff --git a/pkg/aggregator/demultiplexer_agent.go b/pkg/aggregator/demultiplexer_agent.go index 52dab7dd1facb5..163112e58d4c05 100644 --- a/pkg/aggregator/demultiplexer_agent.go +++ b/pkg/aggregator/demultiplexer_agent.go @@ -254,7 +254,7 @@ func (d *AgentDemultiplexer) AddAgentStartupTelemetry(agentVersion string) { if d.aggregator.hostname != "" { // Send startup event only when we have a valid hostname d.aggregator.eventIn <- event.Event{ - Text: fmt.Sprintf("Version %s", agentVersion), + Text: "Version " + agentVersion, SourceTypeName: "System", Host: d.aggregator.hostname, EventType: "Agent Startup", diff --git a/pkg/aggregator/demultiplexer_agent_test.go b/pkg/aggregator/demultiplexer_agent_test.go index 1ad28526f2c74e..1e67481e294db5 100644 --- a/pkg/aggregator/demultiplexer_agent_test.go +++ b/pkg/aggregator/demultiplexer_agent_test.go @@ -158,7 +158,7 @@ func TestMetricSampleTypeConversion(t *testing.T) { } else { require.False(supported, fmt.Sprintf("Metric type %s should be not supported", test.metricType.String())) } - require.Equal(test.apiMetricType, rv, fmt.Sprintf("Wrong conversion for %s", test.metricType.String())) + require.Equal(test.apiMetricType, rv, "Wrong conversion for "+test.metricType.String()) } } diff --git a/pkg/aggregator/internal/tags/store.go b/pkg/aggregator/internal/tags/store.go index 19a844c2fbbe67..3c9654c46e0479 100644 --- a/pkg/aggregator/internal/tags/store.go +++ b/pkg/aggregator/internal/tags/store.go @@ -7,9 +7,9 @@ package tags import ( - "fmt" "maps" "math/bits" + "strconv" "go.uber.org/atomic" @@ -147,10 +147,10 @@ func (tc *Store) updateTelemetry(s *entryStats) { tlmEntries.Set(float64(len(tc.tagsByKey)), t.name) for i := 0; i < 3; i++ { - tlmTagsetRefsCnt.Set(float64(s.refsFreq[i]), t.name, fmt.Sprintf("%d", i+1)) + tlmTagsetRefsCnt.Set(float64(s.refsFreq[i]), t.name, strconv.Itoa(i+1)) } for i := 3; i < 8; i++ { - tlmTagsetRefsCnt.Set(float64(s.refsFreq[i]), t.name, fmt.Sprintf("%d", 1<<(i-1))) + tlmTagsetRefsCnt.Set(float64(s.refsFreq[i]), t.name, strconv.Itoa(1<<(i-1))) } tlmTagsetMinTags.Set(float64(s.minSize), t.name) diff --git a/pkg/aggregator/sender.go b/pkg/aggregator/sender.go index 3f0d60b15fed50..7c3c7e5e01a82a 100644 --- a/pkg/aggregator/sender.go +++ b/pkg/aggregator/sender.go @@ -6,7 +6,7 @@ package aggregator import ( - "fmt" + "errors" "sync" "time" @@ -138,7 +138,7 @@ func (s *checkSender) SetCheckService(service string) { // FinalizeCheckServiceTag appends the service as a tag for metrics, events, and service checks func (s *checkSender) FinalizeCheckServiceTag() { if s.service != "" { - s.checkTags = append(s.checkTags, fmt.Sprintf("service:%s", s.service)) + s.checkTags = append(s.checkTags, "service:"+s.service) } } @@ -316,7 +316,7 @@ func (s *checkSender) Distribution(metric string, value float64, hostname string // The timestamp is in seconds since epoch (accepts fractional seconds) func (s *checkSender) GaugeWithTimestamp(metric string, value float64, hostname string, tags []string, timestamp float64) error { if timestamp <= 0 { - return fmt.Errorf("invalid timestamp") + return errors.New("invalid timestamp") } s.sendMetricSample(metric, value, hostname, tags, metrics.GaugeWithTimestampType, false, false, timestamp) return nil @@ -328,7 +328,7 @@ func (s *checkSender) GaugeWithTimestamp(metric string, value float64, hostname // The timestamp is in seconds since epoch (accepts fractional seconds) func (s *checkSender) CountWithTimestamp(metric string, value float64, hostname string, tags []string, timestamp float64) error { if timestamp <= 0 { - return fmt.Errorf("invalid timestamp") + return errors.New("invalid timestamp") } s.sendMetricSample(metric, value, hostname, tags, metrics.CountWithTimestampType, false, false, timestamp) return nil @@ -417,7 +417,7 @@ func (sp *checkSenderPool) getSender(id checkid.ID) (sender.Sender, error) { if sender, ok := sp.senders[id]; ok { return sender, nil } - return nil, fmt.Errorf("Sender not found") + return nil, errors.New("Sender not found") } func (sp *checkSenderPool) mkSender(id checkid.ID) (sender.Sender, error) { diff --git a/pkg/api/security/cert/cert_getter.go b/pkg/api/security/cert/cert_getter.go index 81114d78fbb3cf..e4d5a35288826e 100644 --- a/pkg/api/security/cert/cert_getter.go +++ b/pkg/api/security/cert/cert_getter.go @@ -12,6 +12,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" + "errors" "fmt" "net" "path/filepath" @@ -135,11 +136,11 @@ func FetchOrCreateIPCCert(ctx context.Context, config configModel.Reader) (*tls. func GetTLSConfigFromCert(ipccert, ipckey []byte) (*tls.Config, *tls.Config, error) { certPool := x509.NewCertPool() if ok := certPool.AppendCertsFromPEM(ipccert); !ok { - return nil, nil, fmt.Errorf("Unable to generate certPool from PEM IPC cert") + return nil, nil, errors.New("Unable to generate certPool from PEM IPC cert") } tlsCert, err := tls.X509KeyPair(ipccert, ipckey) if err != nil { - return nil, nil, fmt.Errorf("Unable to generate x509 cert from PERM IPC cert and key") + return nil, nil, errors.New("Unable to generate x509 cert from PERM IPC cert and key") } clientTLSConfig := &tls.Config{ diff --git a/pkg/api/security/cert/cert_getter_dca.go b/pkg/api/security/cert/cert_getter_dca.go index 9c2148f9066630..cd646b8cd8fc89 100644 --- a/pkg/api/security/cert/cert_getter_dca.go +++ b/pkg/api/security/cert/cert_getter_dca.go @@ -9,6 +9,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" + "errors" "fmt" "net" "net/url" @@ -43,7 +44,7 @@ func readClusterCA(caCertPath, caKeyPath string) (*x509.Certificate, any, error) // Parse the cluster CA cert block, _ := pem.Decode(caCertPEM) if block == nil { - return nil, nil, fmt.Errorf("unable to decode cluster CA cert PEM") + return nil, nil, errors.New("unable to decode cluster CA cert PEM") } caCert, err = x509.ParseCertificate(block.Bytes) if err != nil { @@ -53,7 +54,7 @@ func readClusterCA(caCertPath, caKeyPath string) (*x509.Certificate, any, error) // Parse the cluster CA key block, _ = pem.Decode(caKeyPEM) if block == nil { - return nil, nil, fmt.Errorf("unable to decode cluster CA key PEM") + return nil, nil, errors.New("unable to decode cluster CA key PEM") } var caPrivKey any @@ -117,7 +118,7 @@ func (c *clusterCAData) buildClusterClientTLSConfig() (*tls.Config, error) { // If TLS verification is enabled, configure proper certificate validation // It's not possible to have TLS verification enabled without a CA certificate if c.caCert == nil || c.caPrivKey == nil { - return nil, fmt.Errorf("cluster_trust_chain.enable_tls_verification cannot be true if cluster_trust_chain.ca_cert_file_path or cluster_trust_chain.ca_key_file_path is not set") + return nil, errors.New("cluster_trust_chain.enable_tls_verification cannot be true if cluster_trust_chain.ca_cert_file_path or cluster_trust_chain.ca_key_file_path is not set") } clusterClientCertPool := x509.NewCertPool() @@ -168,7 +169,7 @@ func (c *clusterCAData) setupCertificateFactoryWithClusterCA(config configModel. // If the process is a CLC Runner, add the CLC Runner host to the SANs clcRunnerHost := config.GetString("clc_runner_host") if clcRunnerHost == "" { - return fmt.Errorf("clc_runner_host is not set") + return errors.New("clc_runner_host is not set") } serverHost = clcRunnerHost } diff --git a/pkg/api/security/security_test.go b/pkg/api/security/security_test.go index 008500b664655f..7a487d051a9298 100644 --- a/pkg/api/security/security_test.go +++ b/pkg/api/security/security_test.go @@ -12,6 +12,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -41,7 +42,7 @@ func TestCreateOrFetchAuthTokenValidGen(t *testing.T) { config, expectTokenPath := initMockConf(t) token, err := FetchOrCreateAuthToken(context.Background(), config) require.NoError(t, err, fmt.Sprintf("%v", err)) - assert.True(t, len(token) > authTokenMinimalLen, fmt.Sprintf("%d", len(token))) + assert.True(t, len(token) > authTokenMinimalLen, strconv.Itoa(len(token))) _, err = os.Stat(expectTokenPath) require.NoError(t, err) } @@ -57,7 +58,7 @@ func TestFetchAuthToken(t *testing.T) { newToken, err := FetchOrCreateAuthToken(context.Background(), config) require.NoError(t, err, fmt.Sprintf("%v", err)) - require.True(t, len(newToken) > authTokenMinimalLen, fmt.Sprintf("%d", len(newToken))) + require.True(t, len(newToken) > authTokenMinimalLen, strconv.Itoa(len(newToken))) _, err = os.Stat(expectTokenPath) require.NoError(t, err) diff --git a/pkg/api/util/util_dca.go b/pkg/api/util/util_dca.go index a97bcc47c1f9df..7c3c24470be4e0 100644 --- a/pkg/api/util/util_dca.go +++ b/pkg/api/util/util_dca.go @@ -10,6 +10,7 @@ import ( "context" "crypto/subtle" "crypto/tls" + "errors" "fmt" "net" "net/http" @@ -86,7 +87,7 @@ func GetCrossNodeClientTLSConfig() (*tls.Config, error) { defer tokenLock.RUnlock() if crossNodeClientTLSConfig == nil { - return nil, fmt.Errorf("cross-node client TLS configuration is not set") + return nil, errors.New("cross-node client TLS configuration is not set") } if crossNodeClientTLSConfig.InsecureSkipVerify { @@ -104,7 +105,7 @@ func TokenValidator(tokenGetter func() string) func(w http.ResponseWriter, r *ht auth := r.Header.Get("Authorization") if auth == "" { w.Header().Set("WWW-Authenticate", `Bearer realm="Datadog Agent"`) - err = fmt.Errorf("no session token provided") + err = errors.New("no session token provided") http.Error(w, err.Error(), 401) return err } @@ -119,7 +120,7 @@ func TokenValidator(tokenGetter func() string) func(w http.ResponseWriter, r *ht // The following comparison must be evaluated in constant time if len(tok) != 2 || !constantCompareStrings(tok[1], tokenGetter()) { - err = fmt.Errorf("invalid session token") + err = errors.New("invalid session token") http.Error(w, err.Error(), 403) } diff --git a/pkg/cli/standalone/jmx_nojmx.go b/pkg/cli/standalone/jmx_nojmx.go index 053f8e275b826d..ee911ac44f0a44 100644 --- a/pkg/cli/standalone/jmx_nojmx.go +++ b/pkg/cli/standalone/jmx_nojmx.go @@ -8,7 +8,7 @@ package standalone import ( - "fmt" + "errors" "github.com/DataDog/datadog-agent/comp/agent/jmxlogger" internalAPI "github.com/DataDog/datadog-agent/comp/api/api/def" @@ -18,15 +18,15 @@ import ( // ExecJMXCommandConsole is not supported when the 'jmx' build tag isn't included func ExecJMXCommandConsole(_ string, _ []string, _ string, _ []integration.Config, _ internalAPI.Component, _ jmxlogger.Component, _ ipc.Component) error { - return fmt.Errorf("not supported: the Agent is compiled without the 'jmx' build tag") + return errors.New("not supported: the Agent is compiled without the 'jmx' build tag") } // ExecJmxListWithMetricsJSON is not supported when the 'jmx' build tag isn't included func ExecJmxListWithMetricsJSON(_ []string, _ string, _ []integration.Config, _ internalAPI.Component, _ jmxlogger.Component, _ ipc.Component) error { - return fmt.Errorf("not supported: the Agent is compiled without the 'jmx' build tag") + return errors.New("not supported: the Agent is compiled without the 'jmx' build tag") } // ExecJmxListWithRateMetricsJSON is not supported when the 'jmx' build tag isn't included func ExecJmxListWithRateMetricsJSON(_ []string, _ string, _ []integration.Config, _ internalAPI.Component, _ jmxlogger.Component, _ ipc.Component) error { - return fmt.Errorf("not supported: the Agent is compiled without the 'jmx' build tag") + return errors.New("not supported: the Agent is compiled without the 'jmx' build tag") } diff --git a/pkg/cli/subcommands/autoscalerlist/command.go b/pkg/cli/subcommands/autoscalerlist/command.go index 95884162276c80..3aa797217615e8 100644 --- a/pkg/cli/subcommands/autoscalerlist/command.go +++ b/pkg/cli/subcommands/autoscalerlist/command.go @@ -8,6 +8,7 @@ package autoscalerlist import ( "encoding/json" + "errors" "fmt" "io" @@ -104,7 +105,7 @@ func getAutoscalerURL(config config.Component) (string, error) { if flavor.GetFlavor() == flavor.ClusterAgent { urlstr = fmt.Sprintf("https://%v:%v/autoscaler-list", ipcAddress, config.GetInt("cluster_agent.cmd_port")) } else { - return "", fmt.Errorf("running autoscaler-list is only supported on the cluster agent") + return "", errors.New("running autoscaler-list is only supported on the cluster agent") } return urlstr, nil @@ -121,7 +122,7 @@ func getAutoscalerList(client ipc.HTTPClient, w io.Writer, url string) error { } if len(r) == 0 { - return fmt.Errorf("no autoscalers found") + return errors.New("no autoscalers found") } autoscalerDump := autoscalingWorkload.AutoscalersInfo{} diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index 98b4dbc5909ab7..bac61478010165 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -10,6 +10,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -455,7 +456,7 @@ func run( } } } - return fmt.Errorf("no valid check found") + return errors.New("no valid check found") } if len(cs) > 1 { @@ -690,7 +691,7 @@ func populateMemoryProfileConfig(cliParams *cliParams, initConfig map[string]int if cliParams.profileMemoryFrames != "" { profileMemoryFrames, err := strconv.Atoi(cliParams.profileMemoryFrames) if err != nil { - return fmt.Errorf("--m-frames must be an integer") + return errors.New("--m-frames must be an integer") } initConfig["profile_memory_frames"] = profileMemoryFrames } @@ -698,7 +699,7 @@ func populateMemoryProfileConfig(cliParams *cliParams, initConfig map[string]int if cliParams.profileMemoryGC != "" { profileMemoryGC, err := strconv.Atoi(cliParams.profileMemoryGC) if err != nil { - return fmt.Errorf("--m-gc must be an integer") + return errors.New("--m-gc must be an integer") } initConfig["profile_memory_gc"] = profileMemoryGC @@ -707,11 +708,11 @@ func populateMemoryProfileConfig(cliParams *cliParams, initConfig map[string]int if cliParams.profileMemoryCombine != "" { profileMemoryCombine, err := strconv.Atoi(cliParams.profileMemoryCombine) if err != nil { - return fmt.Errorf("--m-combine must be an integer") + return errors.New("--m-combine must be an integer") } if profileMemoryCombine != 0 && cliParams.profileMemorySort == "traceback" { - return fmt.Errorf("--m-combine cannot be sorted (--m-sort) by traceback") + return errors.New("--m-combine cannot be sorted (--m-sort) by traceback") } initConfig["profile_memory_combine"] = profileMemoryCombine @@ -719,7 +720,7 @@ func populateMemoryProfileConfig(cliParams *cliParams, initConfig map[string]int if cliParams.profileMemorySort != "" { if cliParams.profileMemorySort != "lineno" && cliParams.profileMemorySort != "filename" && cliParams.profileMemorySort != "traceback" { - return fmt.Errorf("--m-sort must one of: lineno | filename | traceback") + return errors.New("--m-sort must one of: lineno | filename | traceback") } initConfig["profile_memory_sort"] = cliParams.profileMemorySort } @@ -727,14 +728,14 @@ func populateMemoryProfileConfig(cliParams *cliParams, initConfig map[string]int if cliParams.profileMemoryLimit != "" { profileMemoryLimit, err := strconv.Atoi(cliParams.profileMemoryLimit) if err != nil { - return fmt.Errorf("--m-limit must be an integer") + return errors.New("--m-limit must be an integer") } initConfig["profile_memory_limit"] = profileMemoryLimit } if cliParams.profileMemoryDiff != "" { if cliParams.profileMemoryDiff != "absolute" && cliParams.profileMemoryDiff != "positive" { - return fmt.Errorf("--m-diff must one of: absolute | positive") + return errors.New("--m-diff must one of: absolute | positive") } initConfig["profile_memory_diff"] = cliParams.profileMemoryDiff } @@ -750,7 +751,7 @@ func populateMemoryProfileConfig(cliParams *cliParams, initConfig map[string]int if cliParams.profileMemoryVerbose != "" { profileMemoryVerbose, err := strconv.Atoi(cliParams.profileMemoryVerbose) if err != nil { - return fmt.Errorf("--m-verbose must be an integer") + return errors.New("--m-verbose must be an integer") } initConfig["profile_memory_verbose"] = profileMemoryVerbose } diff --git a/pkg/cli/subcommands/clusterchecks/command.go b/pkg/cli/subcommands/clusterchecks/command.go index 8e5e83a62c894f..cbe953360ddea4 100644 --- a/pkg/cli/subcommands/clusterchecks/command.go +++ b/pkg/cli/subcommands/clusterchecks/command.go @@ -179,7 +179,7 @@ func rebalance(_ log.Component, client ipc.HTTPClient, cliParams *cliParams) err func isolate(_ log.Component, client ipc.HTTPClient, cliParams *cliParams) error { if cliParams.checkID == "" { - return fmt.Errorf("checkID must be specified") + return errors.New("checkID must be specified") } urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/isolate/check/%s", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), cliParams.checkID) diff --git a/pkg/cli/subcommands/config/command.go b/pkg/cli/subcommands/config/command.go index 28fa8ec9015b3a..ff3e40710e58af 100644 --- a/pkg/cli/subcommands/config/command.go +++ b/pkg/cli/subcommands/config/command.go @@ -165,7 +165,7 @@ func listRuntimeConfigurableValue(_ log.Component, client ipc.HTTPClient, cliPar func setConfigValue(_ log.Component, client ipc.HTTPClient, cliParams *cliParams) error { if len(cliParams.args) != 2 { - return fmt.Errorf("exactly two parameters are required: the setting name and its value") + return errors.New("exactly two parameters are required: the setting name and its value") } c, err := cliParams.SettingsBuilder(client) @@ -189,7 +189,7 @@ func setConfigValue(_ log.Component, client ipc.HTTPClient, cliParams *cliParams func getConfigValue(_ log.Component, client ipc.HTTPClient, cliParams *cliParams) error { if len(cliParams.args) != 1 { - return fmt.Errorf("a single setting name must be specified") + return errors.New("a single setting name must be specified") } c, err := cliParams.SettingsBuilder(client) @@ -207,14 +207,14 @@ func getConfigValue(_ log.Component, client ipc.HTTPClient, cliParams *cliParams if cliParams.source { sourcesVal, ok := resp["sources_value"].([]interface{}) if !ok { - return fmt.Errorf("failed to cast sources_value to []map[interface{}]interface{}") + return errors.New("failed to cast sources_value to []map[interface{}]interface{}") } fmt.Printf("sources and their value:\n") for _, sourceVal := range sourcesVal { sourceVal, ok := sourceVal.(map[string]interface{}) if !ok { - return fmt.Errorf("failed to cast sourceVal to map[string]interface{}") + return errors.New("failed to cast sourceVal to map[string]interface{}") } fmt.Printf(" %s: %v\n", sourceVal["Source"], sourceVal["Value"]) } diff --git a/pkg/cli/subcommands/workloadfilterlist/verify_config_cel.go b/pkg/cli/subcommands/workloadfilterlist/verify_config_cel.go index 624c891f4671ee..ca5e88687beb0b 100644 --- a/pkg/cli/subcommands/workloadfilterlist/verify_config_cel.go +++ b/pkg/cli/subcommands/workloadfilterlist/verify_config_cel.go @@ -8,6 +8,7 @@ package workloadfilterlist import ( + "errors" "fmt" "io" "strings" @@ -40,7 +41,7 @@ func verifyCELConfig(_ io.Writer, reader io.Reader) error { if len(ruleBundles) == 0 { fmt.Fprintf(color.Output, "%s No rules found in the input\n", color.HiRedString("✗")) - return fmt.Errorf("no rules found in the input") + return errors.New("no rules found in the input") } fmt.Fprintf(color.Output, "%s YAML loaded successfully (%d bundle(s))\n", @@ -54,7 +55,7 @@ func verifyCELConfig(_ io.Writer, reader io.Reader) error { for _, err := range parseErrors { fmt.Fprintf(color.Output, " - %s\n", color.RedString(err.Error())) } - return fmt.Errorf("invalid configuration structure") + return errors.New("invalid configuration structure") } fmt.Fprintf(color.Output, "%s Configuration structure is valid\n", color.HiGreenString("✓")) @@ -92,7 +93,7 @@ func verifyCELConfig(_ io.Writer, reader io.Reader) error { if hasErrors { fmt.Fprintf(color.Output, "%s Validation failed - some rules have errors\n", color.HiRedString("✗")) - return fmt.Errorf("CEL compilation failed") + return errors.New("CEL compilation failed") } fmt.Fprintf(color.Output, "%s All rules are valid!\n", color.HiGreenString("✅")) diff --git a/pkg/cloudfoundry/containertagger/container_tagger.go b/pkg/cloudfoundry/containertagger/container_tagger.go index a5f1805e6f86a9..8fcf12f8bf7d1f 100644 --- a/pkg/cloudfoundry/containertagger/container_tagger.go +++ b/pkg/cloudfoundry/containertagger/container_tagger.go @@ -166,7 +166,7 @@ func updateTagsInContainer(container garden.Container, tags []string) (int, erro Path: shell_path, Args: []string{"/home/vcap/app/.datadog/scripts/update_agent_config.sh"}, User: "vcap", - Env: []string{fmt.Sprintf("DD_NODE_AGENT_TAGS=%s", strings.Join(tags, ","))}, + Env: []string{"DD_NODE_AGENT_TAGS=" + strings.Join(tags, ",")}, }, garden.ProcessIO{}) if err != nil { return -1, err diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar_test.go b/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar_test.go index 9b9daa436bb609..f0e5f2a166db40 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar_test.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar_test.go @@ -604,7 +604,7 @@ func TestDefaultSidecarTemplateAgentImage(t *testing.T) { name: "no configuration set", setConfig: func() model.Config { return configmock.New(t) }, containerRegistry: commonRegistry, - expectedImage: fmt.Sprintf("%s/agent:latest", commonRegistry), + expectedImage: commonRegistry + "/agent:latest", }, { name: "setting custom registry, image and tag", diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/errors.go b/pkg/clusteragent/admission/mutate/agent_sidecar/errors.go index 84260ad8948968..f4ef8fedebb459 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/errors.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/errors.go @@ -7,8 +7,6 @@ package agentsidecar -import "fmt" - // VolumeAlreadyAttached indicates that a give volume has // already been attached to a Pod's spec type VolumeAlreadyAttached struct { @@ -22,9 +20,9 @@ type PathAlreadyMounted struct { } func (e VolumeAlreadyAttached) Error() string { - return fmt.Sprintf("%s is already attached", e.volume) + return e.volume + " is already attached" } func (e PathAlreadyMounted) Error() string { - return fmt.Sprintf("there is already a volume mounted at %s", e.path) + return "there is already a volume mounted at " + e.path } diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/overrides.go b/pkg/clusteragent/admission/mutate/agent_sidecar/overrides.go index 9389d381a6c013..efa399abad6814 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/overrides.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/overrides.go @@ -8,7 +8,7 @@ package agentsidecar import ( - "fmt" + "errors" corev1 "k8s.io/api/core/v1" ) @@ -17,7 +17,7 @@ import ( // boolean that indicates if the container was mutated func withEnvOverrides(container *corev1.Container, extraEnv ...corev1.EnvVar) (bool, error) { if container == nil { - return false, fmt.Errorf("can't apply environment overrides to nil container") + return false, errors.New("can't apply environment overrides to nil container") } mutated := false @@ -49,7 +49,7 @@ func withEnvOverrides(container *corev1.Container, extraEnv ...corev1.EnvVar) (b // withResourceLimits applies the resource limits overrides to the container func withResourceLimits(container *corev1.Container, resourceLimits corev1.ResourceRequirements) error { if container == nil { - return fmt.Errorf("can't apply resource requirements overrides to nil container") + return errors.New("can't apply resource requirements overrides to nil container") } container.Resources = resourceLimits return nil @@ -58,7 +58,7 @@ func withResourceLimits(container *corev1.Container, resourceLimits corev1.Resou // withSecurityContextOverrides applies the security context overrides to the container func withSecurityContextOverrides(container *corev1.Container, securityContext *corev1.SecurityContext) (bool, error) { if container == nil { - return false, fmt.Errorf("can't apply security context overrides to nil container") + return false, errors.New("can't apply security context overrides to nil container") } mutated := false diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go b/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go index 79430f8d8cbca6..7db499aa60fd2e 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go @@ -9,6 +9,7 @@ package agentsidecar import ( "encoding/json" + "errors" "fmt" corev1 "k8s.io/api/core/v1" @@ -41,7 +42,7 @@ func loadSidecarProfiles(profilesJSON string) ([]ProfileOverride, error) { } if len(profiles) > 1 { - return nil, fmt.Errorf("only 1 profile is supported") + return nil, errors.New("only 1 profile is supported") } return profiles, nil @@ -51,11 +52,11 @@ func loadSidecarProfiles(profilesJSON string) ([]ProfileOverride, error) { // returns a boolean that indicates if the container was mutated func applyProfileOverrides(container *corev1.Container, profiles []ProfileOverride) (bool, error) { if container == nil { - return false, fmt.Errorf("can't apply profile overrides to nil containers") + return false, errors.New("can't apply profile overrides to nil containers") } if profiles == nil { - return false, fmt.Errorf("can't apply nil profiles") + return false, errors.New("can't apply nil profiles") } if len(profiles) == 0 { diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go index cc0995c3507d34..f69eaa47385280 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go @@ -8,6 +8,7 @@ package agentsidecar import ( + "errors" "fmt" "slices" @@ -87,7 +88,7 @@ func applyProviderOverrides(pod *corev1.Pod, provider string) (bool, error) { // This function returns a boolean that indicates if the pod was mutated. func applyFargateOverrides(pod *corev1.Pod) (bool, error) { if pod == nil { - return false, fmt.Errorf("can't apply profile overrides to nil pod") + return false, errors.New("can't apply profile overrides to nil pod") } mutated := deleteConfigWebhookVolumesAndMounts(pod) diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go index 50c400dbcd780a..a9c2d13c46d0c5 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go @@ -2704,7 +2704,7 @@ func newImage(i string) *image { return nil } name := parts[len(parts)-1] - registry := strings.TrimSuffix(fullImage, fmt.Sprintf("/%s", name)) + registry := strings.TrimSuffix(fullImage, "/"+name) return &image{ raw: i, @@ -2817,7 +2817,7 @@ func (v *podValidator) requireInjection(t *testing.T, expectedContainers []strin { Name: "datadog-auto-instrumentation", MountPath: "/datadog-lib", - SubPath: fmt.Sprintf("opt/datadog/apm/library/%s", lang), + SubPath: "opt/datadog/apm/library/" + lang, }, { Name: "datadog-auto-instrumentation", diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/config.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/config.go index e65f00d53623f3..a731e0a84f1dd7 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/config.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/config.go @@ -9,6 +9,7 @@ package autoinstrumentation import ( "encoding/json" + "errors" "fmt" corev1 "k8s.io/api/core/v1" @@ -189,23 +190,23 @@ func NewInstrumentationConfig(datadogConfig config.Component) (*InstrumentationC // Ensure both enabled and disabled namespaces are not set together. if len(cfg.EnabledNamespaces) > 0 && len(cfg.DisabledNamespaces) > 0 { - return nil, fmt.Errorf("apm_config.instrumentation.enabled_namespaces and apm_config.instrumentation.disabled_namespaces are mutually exclusive and cannot be set together") + return nil, errors.New("apm_config.instrumentation.enabled_namespaces and apm_config.instrumentation.disabled_namespaces are mutually exclusive and cannot be set together") } // Ensure both enabled namespaces and targets are not set together. if len(cfg.EnabledNamespaces) > 0 && len(cfg.Targets) > 0 { - return nil, fmt.Errorf("apm_config.instrumentation.enabled_namespaces and apm_config.instrumentation.targets are mutually exclusive and cannot be set together") + return nil, errors.New("apm_config.instrumentation.enabled_namespaces and apm_config.instrumentation.targets are mutually exclusive and cannot be set together") } // Ensure both library versions and targets are not set together. if len(cfg.LibVersions) > 0 && len(cfg.Targets) > 0 { - return nil, fmt.Errorf("apm_config.instrumentation.lib_versions and apm_config.instrumentation.targets are mutually exclusive and cannot be set together") + return nil, errors.New("apm_config.instrumentation.lib_versions and apm_config.instrumentation.targets are mutually exclusive and cannot be set together") } // Ensure both namespace names and labels are not set together. for _, target := range cfg.Targets { if target.NamespaceSelector != nil && len(target.NamespaceSelector.MatchNames) > 0 && (len(target.NamespaceSelector.MatchLabels) > 0 || len(target.NamespaceSelector.MatchExpressions) > 0) { - return nil, fmt.Errorf("apm_config.instrumentation.targets[].namespaceSelector.matchNames and apm_config.instrumentation.targets[].namespaceSelector.matchLabels/matchExpressions are mutually exclusive and cannot be set together") + return nil, errors.New("apm_config.instrumentation.targets[].namespaceSelector.matchNames and apm_config.instrumentation.targets[].namespaceSelector.matchLabels/matchExpressions are mutually exclusive and cannot be set together") } } diff --git a/pkg/clusteragent/admission/mutate/common/common.go b/pkg/clusteragent/admission/mutate/common/common.go index dc631a6b015706..7b1da9f1a46d5c 100644 --- a/pkg/clusteragent/admission/mutate/common/common.go +++ b/pkg/clusteragent/admission/mutate/common/common.go @@ -176,7 +176,7 @@ func InjectVolume(pod *corev1.Pod, volume corev1.Volume, volumeMount corev1.Volu // PodString returns a string that helps identify the pod func PodString(pod *corev1.Pod) string { if pod.GetNamespace() == "" || pod.GetName() == "" { - return fmt.Sprintf("with generate name %s", pod.GetGenerateName()) + return "with generate name " + pod.GetGenerateName() } return fmt.Sprintf("%s/%s", pod.GetNamespace(), pod.GetName()) } diff --git a/pkg/clusteragent/admission/mutate/common/filter.go b/pkg/clusteragent/admission/mutate/common/filter.go index 3e362c1a730cc0..22850d43af1689 100644 --- a/pkg/clusteragent/admission/mutate/common/filter.go +++ b/pkg/clusteragent/admission/mutate/common/filter.go @@ -8,6 +8,7 @@ package common import ( + "errors" "fmt" corev1 "k8s.io/api/core/v1" @@ -98,7 +99,7 @@ func (f *DefaultFilter) IsNamespaceEligible(ns string) bool { // - Enabled and disabled namespaces: return error. func makeNamespaceFilter(enabledNamespaces, disabledNamespaces []string) (*containers.Filter, error) { if len(enabledNamespaces) > 0 && len(disabledNamespaces) > 0 { - return nil, fmt.Errorf("enabled_namespaces and disabled_namespaces configuration cannot be set together") + return nil, errors.New("enabled_namespaces and disabled_namespaces configuration cannot be set together") } // Prefix the namespaces as needed by the containers.Filter. diff --git a/pkg/clusteragent/admission/mutate/common/test_utils.go b/pkg/clusteragent/admission/mutate/common/test_utils.go index 210f8957d68d8d..cfc1c8657869d5 100644 --- a/pkg/clusteragent/admission/mutate/common/test_utils.go +++ b/pkg/clusteragent/admission/mutate/common/test_utils.go @@ -10,6 +10,7 @@ package common import ( "context" "encoding/json" + "errors" "fmt" "testing" @@ -337,7 +338,7 @@ type MockMutator struct { func (m *MockMutator) MutatePod(_ *corev1.Pod, _ string, _ dynamic.Interface) (bool, error) { m.Called = true if m.ShoudErr { - return false, fmt.Errorf("error") + return false, errors.New("error") } return m.ShouldMutate, nil diff --git a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go index 94d31b87cb5e50..35618635eb5b73 100644 --- a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go @@ -351,7 +351,7 @@ func NewCWSInstrumentation(wmeta workloadmeta.Component, datadogConfig config.Co cwsInjectorContainerRegistry := mutatecommon.ContainerRegistry(datadogConfig, "admission_controller.cws_instrumentation.container_registry") if len(cwsInjectorImageName) == 0 { - return nil, fmt.Errorf("can't initialize CWS Instrumentation without an image_name") + return nil, errors.New("can't initialize CWS Instrumentation without an image_name") } if len(cwsInjectorImageTag) == 0 { cwsInjectorImageTag = "latest" diff --git a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go index acf76d1e415014..9d41839c58bc1e 100644 --- a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go @@ -10,6 +10,7 @@ package cwsinstrumentation import ( "context" "encoding/json" + "errors" "fmt" "path/filepath" "strings" @@ -85,7 +86,7 @@ type MockV1PodsGetter struct { // Get looks up a pod based on user input func (mvpg *MockV1PodsGetter) Get(_ context.Context, _ string, _ metav1.GetOptions) (*corev1.Pod, error) { if mvpg.shouldFail { - return nil, fmt.Errorf("mocked V1PodsGetter error") + return nil, errors.New("mocked V1PodsGetter error") } return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -593,7 +594,7 @@ func Test_injectCWSPodInstrumentation(t *testing.T) { }, expectedInitContainer: corev1.Container{ Name: cwsInjectorInitContainerName, - Image: fmt.Sprintf("%s/my-image:latest", commonRegistry), + Image: commonRegistry + "/my-image:latest", Command: []string{"/cws-instrumentation", "setup", "--cws-volume-mount", cwsMountPath}, VolumeMounts: []corev1.VolumeMount{ { @@ -617,7 +618,7 @@ func Test_injectCWSPodInstrumentation(t *testing.T) { }, expectedInitContainer: corev1.Container{ Name: cwsInjectorInitContainerName, - Image: fmt.Sprintf("%s/my-image:my-tag", commonRegistry), + Image: commonRegistry + "/my-image:my-tag", Command: []string{"/cws-instrumentation", "setup", "--cws-volume-mount", cwsMountPath}, VolumeMounts: []corev1.VolumeMount{ { @@ -677,7 +678,7 @@ func Test_injectCWSPodInstrumentation(t *testing.T) { }, expectedInitContainer: corev1.Container{ Name: cwsInjectorInitContainerName, - Image: fmt.Sprintf("%s/my-image:latest", commonRegistry), + Image: commonRegistry + "/my-image:latest", Command: []string{"/cws-instrumentation", "setup", "--cws-volume-mount", cwsMountPath}, VolumeMounts: []corev1.VolumeMount{ { @@ -741,7 +742,7 @@ func Test_injectCWSPodInstrumentation(t *testing.T) { }, expectedInitContainer: corev1.Container{ Name: cwsInjectorInitContainerName, - Image: fmt.Sprintf("%s/my-image:latest", commonRegistry), + Image: commonRegistry + "/my-image:latest", Command: []string{"/cws-instrumentation", "setup", "--cws-volume-mount", cwsMountPath}, VolumeMounts: []corev1.VolumeMount{ { @@ -783,7 +784,7 @@ func Test_injectCWSPodInstrumentation(t *testing.T) { }, expectedInitContainer: corev1.Container{ Name: cwsInjectorInitContainerName, - Image: fmt.Sprintf("%s/my-image:latest", commonRegistry), + Image: commonRegistry + "/my-image:latest", Command: []string{"/cws-instrumentation", "setup", "--cws-volume-mount", cwsMountPath}, VolumeMounts: []corev1.VolumeMount{ { diff --git a/pkg/clusteragent/admission/mutate/tagsfromlabels/owner.go b/pkg/clusteragent/admission/mutate/tagsfromlabels/owner.go index 58de29d92a7803..39d101fc152bff 100644 --- a/pkg/clusteragent/admission/mutate/tagsfromlabels/owner.go +++ b/pkg/clusteragent/admission/mutate/tagsfromlabels/owner.go @@ -48,7 +48,7 @@ func getOwnerInfo(owner metav1.OwnerReference) (*ownerInfo, error) { return nil, err } return &ownerInfo{ - gvr: gv.WithResource(fmt.Sprintf("%ss", strings.ToLower(owner.Kind))), + gvr: gv.WithResource(strings.ToLower(owner.Kind) + "s"), name: owner.Name, }, nil } diff --git a/pkg/clusteragent/admission/patch/patcher.go b/pkg/clusteragent/admission/patch/patcher.go index e458d8d400ce97..995d59db3ab5e6 100644 --- a/pkg/clusteragent/admission/patch/patcher.go +++ b/pkg/clusteragent/admission/patch/patcher.go @@ -12,6 +12,7 @@ import ( "context" "encoding/json" "fmt" + "strconv" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" @@ -72,7 +73,7 @@ func (p *patcher) patchDeployment(req Request) error { if err != nil { return fmt.Errorf("failed to encode object: %v", err) } - revision := fmt.Sprint(req.Revision) + revision := strconv.FormatInt(req.Revision, 10) if deploy.Annotations == nil { deploy.Annotations = make(map[string]string) } @@ -131,7 +132,7 @@ func enableConfig(deploy *corev1.Deployment, req Request) error { configAnnotKey := fmt.Sprintf(common.LibConfigV1AnnotKeyFormat, req.LibConfig.Language) deploy.Spec.Template.Annotations[configAnnotKey] = string(conf) deploy.Spec.Template.Annotations[k8sutil.RcIDAnnotKey] = req.ID - deploy.Spec.Template.Annotations[k8sutil.RcRevisionAnnotKey] = fmt.Sprint(req.Revision) + deploy.Spec.Template.Annotations[k8sutil.RcRevisionAnnotKey] = strconv.FormatInt(req.Revision, 10) return nil } @@ -152,5 +153,5 @@ func disableConfig(deploy *corev1.Deployment, req Request) { configAnnotKey := fmt.Sprintf(common.LibConfigV1AnnotKeyFormat, req.LibConfig.Language) delete(deploy.Spec.Template.Annotations, configAnnotKey) deploy.Spec.Template.Annotations[k8sutil.RcIDAnnotKey] = req.ID - deploy.Spec.Template.Annotations[k8sutil.RcRevisionAnnotKey] = fmt.Sprint(req.Revision) + deploy.Spec.Template.Annotations[k8sutil.RcRevisionAnnotKey] = strconv.FormatInt(req.Revision, 10) } diff --git a/pkg/clusteragent/admission/status.go b/pkg/clusteragent/admission/status.go index a9a326c4ceb7ab..9c517dd3bc7847 100644 --- a/pkg/clusteragent/admission/status.go +++ b/pkg/clusteragent/admission/status.go @@ -10,6 +10,7 @@ package admission import ( "context" "embed" + "errors" "fmt" "hash/fnv" "io" @@ -65,11 +66,11 @@ func GetStatus(apiCl kubernetes.Interface) map[string]interface{} { } var getValidatingWebhookStatus = func(string, kubernetes.Interface) (map[string]interface{}, error) { - return nil, fmt.Errorf("admission controller not started") + return nil, errors.New("admission controller not started") } var getMutatingWebhookStatus = func(string, kubernetes.Interface) (map[string]interface{}, error) { - return nil, fmt.Errorf("admission controller not started") + return nil, errors.New("admission controller not started") } func getValidatingWebhookStatusV1(name string, apiCl kubernetes.Interface) (map[string]interface{}, error) { @@ -93,7 +94,7 @@ func getValidatingWebhookStatusV1(name string, apiCl kubernetes.Interface) (map[ port = fmt.Sprintf("Port: %d", *svc.Port) } if svc.Path != nil { - path = fmt.Sprintf("Path: %s", *svc.Path) + path = "Path: " + *svc.Path } validatingWebhooksConfig[w.Name]["Service"] = fmt.Sprintf("%s/%s - %s - %s", svc.Namespace, svc.Name, port, path) } @@ -130,7 +131,7 @@ func getValidatingWebhookStatusV1beta1(name string, apiCl kubernetes.Interface) port = fmt.Sprintf("Port: %d", *svc.Port) } if svc.Path != nil { - path = fmt.Sprintf("Path: %s", *svc.Path) + path = "Path: " + *svc.Path } validatingWebhooksConfig[w.Name]["Service"] = fmt.Sprintf("%s/%s - %s - %s", svc.Namespace, svc.Name, port, path) } @@ -167,7 +168,7 @@ func getMutatingWebhookStatusV1(name string, apiCl kubernetes.Interface) (map[st port = fmt.Sprintf("Port: %d", *svc.Port) } if svc.Path != nil { - path = fmt.Sprintf("Path: %s", *svc.Path) + path = "Path: " + *svc.Path } mutatingWebhooksConfig[w.Name]["Service"] = fmt.Sprintf("%s/%s - %s - %s", svc.Namespace, svc.Name, port, path) } @@ -203,7 +204,7 @@ func getMutatingWebhookStatusV1beta1(name string, apiCl kubernetes.Interface) (m port = fmt.Sprintf("Port: %d", *svc.Port) } if svc.Path != nil { - path = fmt.Sprintf("Path: %s", *svc.Path) + path = "Path: " + *svc.Path } mutatingWebhooksConfig[w.Name]["Service"] = fmt.Sprintf("%s/%s - %s - %s", svc.Namespace, svc.Name, port, path) } diff --git a/pkg/clusteragent/api/leader_forwarder.go b/pkg/clusteragent/api/leader_forwarder.go index df01c778a26ea2..8440bf609526de 100644 --- a/pkg/clusteragent/api/leader_forwarder.go +++ b/pkg/clusteragent/api/leader_forwarder.go @@ -9,7 +9,6 @@ package api import ( "crypto/tls" - "fmt" stdLog "log" "net" "net/http" @@ -87,7 +86,7 @@ func (lf *LeaderForwarder) Forward(rw http.ResponseWriter, req *http.Request) { rw.Header().Set(respForwarded, "true") if req.Header.Get(forwardHeader) != "" { - http.Error(rw, fmt.Sprintf("Query was already forwarded from: %s", req.RemoteAddr), http.StatusLoopDetected) + http.Error(rw, "Query was already forwarded from: "+req.RemoteAddr, http.StatusLoopDetected) } var currentProxy *httputil.ReverseProxy diff --git a/pkg/clusteragent/appsec/injector.go b/pkg/clusteragent/appsec/injector.go index 7e84c7d9e98263..9e8d2f7158b8de 100644 --- a/pkg/clusteragent/appsec/injector.go +++ b/pkg/clusteragent/appsec/injector.go @@ -9,7 +9,7 @@ package appsec import ( "context" - "fmt" + "errors" "maps" "slices" "strconv" @@ -47,7 +47,7 @@ type leaderNotifier func() (<-chan struct{}, func() bool) // Start initializes and starts the proxy injector func Start(ctx context.Context, logger log.Component, datadogConfig config.Component, leaderSub leaderNotifier) error { if injector != nil { - return fmt.Errorf("can't start proxy injection twice") + return errors.New("can't start proxy injection twice") } injectorStartOnce.Do(func() { diff --git a/pkg/clusteragent/appsec/istio/istio.go b/pkg/clusteragent/appsec/istio/istio.go index 6471312bcce530..ee71d739bf5841 100644 --- a/pkg/clusteragent/appsec/istio/istio.go +++ b/pkg/clusteragent/appsec/istio/istio.go @@ -10,6 +10,7 @@ package istio import ( "context" + "errors" "fmt" log "github.com/DataDog/datadog-agent/comp/core/log/def" @@ -19,7 +20,7 @@ import ( istionetworkingv1alpha3 "istio.io/api/networking/v1alpha3" istiov1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -55,7 +56,7 @@ func (i *istioInjectionPattern) IsInjectionPossible(ctx context.Context) error { // Check if the EnvoyFilter CRD is present _, err := i.client.Resource(crdGVR).Get(ctx, gvrToName(filterGVR), metav1.GetOptions{}) - if errors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { return fmt.Errorf("%w: EnvoyExtensionPolicy CRD not found, is the Istio CRDs installed in the cluster? Cannot enable appsec proxy injection for istio", err) } @@ -65,7 +66,7 @@ func (i *istioInjectionPattern) IsInjectionPossible(ctx context.Context) error { // Check if the Gateway CRDs is present _, err = i.client.Resource(crdGVR).Get(ctx, gvrToName(gatewayGVR), metav1.GetOptions{}) - if errors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { return fmt.Errorf("%w: Gateway CRD not found, are the Istio CRDs installed in the cluster? Cannot enable appsec proxy injection for istio", err) } @@ -88,7 +89,7 @@ func (i *istioInjectionPattern) Added(ctx context.Context, obj *unstructured.Uns controllerName, found, err := unstructured.NestedString(obj.UnstructuredContent(), "spec", "controllerName") if err != nil || !found { if err == nil { - err = fmt.Errorf("controllerName not found in gateway spec") + err = errors.New("controllerName not found in gateway spec") } return fmt.Errorf("could not get gateway controller name: %w", err) } @@ -106,7 +107,7 @@ func (i *istioInjectionPattern) Added(ctx context.Context, obj *unstructured.Uns return nil } - if !errors.IsNotFound(err) { + if !k8serrors.IsNotFound(err) { return fmt.Errorf("could not check if Envoy Filter already exists: %w", err) } @@ -123,7 +124,7 @@ func (i *istioInjectionPattern) Deleted(ctx context.Context, obj *unstructured.U controllerName, found, err := unstructured.NestedString(obj.UnstructuredContent(), "spec", "controllerName") if err != nil || !found { if err == nil { - err = fmt.Errorf("controllerName not found in gateway spec") + err = errors.New("controllerName not found in gateway spec") } return fmt.Errorf("could not get gateway controller name: %w", err) } @@ -136,7 +137,7 @@ func (i *istioInjectionPattern) Deleted(ctx context.Context, obj *unstructured.U name := obj.GetName() i.logger.Debugf("Processing deleted gatewayclass for istio: %s", name) _, err = i.client.Resource(filterGVR).Namespace(namespace).Get(ctx, envoyFilterName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { i.logger.Debug("Envoy Filter already deleted") return nil } @@ -148,7 +149,7 @@ func (i *istioInjectionPattern) Deleted(ctx context.Context, obj *unstructured.U err = i.client.Resource(filterGVR). Namespace(namespace). Delete(ctx, envoyFilterName, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { i.logger.Debug("Envoy Filter already deleted") err = nil } @@ -174,7 +175,7 @@ func (i *istioInjectionPattern) createEnvoyFilter(ctx context.Context, namespace _, err = i.client.Resource(filterGVR). Namespace(namespace). Create(ctx, &unstructured.Unstructured{Object: unstructuredFilter}, metav1.CreateOptions{}) - if errors.IsAlreadyExists(err) { + if k8serrors.IsAlreadyExists(err) { i.logger.Debug("Envoy Filter already exists") return nil } diff --git a/pkg/clusteragent/autoscaling/cluster/controller.go b/pkg/clusteragent/autoscaling/cluster/controller.go index 547c435d3582fc..523f1d6e4ee514 100644 --- a/pkg/clusteragent/autoscaling/cluster/controller.go +++ b/pkg/clusteragent/autoscaling/cluster/controller.go @@ -10,6 +10,7 @@ package cluster import ( "context" "encoding/json" + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" @@ -177,7 +178,7 @@ func (c *Controller) createNodePool(ctx context.Context, npi model.NodePoolInter } if len(ncList.Items) == 0 { - return fmt.Errorf("no NodeClasses found, NodePool cannot be created") + return errors.New("no NodeClasses found, NodePool cannot be created") } if len(ncList.Items) > 1 { diff --git a/pkg/clusteragent/autoscaling/cluster/provider.go b/pkg/clusteragent/autoscaling/cluster/provider.go index af931d899cac24..721da16d7e16da 100644 --- a/pkg/clusteragent/autoscaling/cluster/provider.go +++ b/pkg/clusteragent/autoscaling/cluster/provider.go @@ -9,6 +9,7 @@ package cluster import ( "context" + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" @@ -32,7 +33,7 @@ func StartClusterAutoscaling( senderManager sender.SenderManager, ) error { if apiCl == nil { - return fmt.Errorf("Impossible to start cluster autoscaling without valid APIClient") + return errors.New("Impossible to start cluster autoscaling without valid APIClient") } eventBroadcaster := record.NewBroadcaster() diff --git a/pkg/clusteragent/autoscaling/custommetrics/provider.go b/pkg/clusteragent/autoscaling/custommetrics/provider.go index 42999a82280d2b..893e7daa4b0a1d 100644 --- a/pkg/clusteragent/autoscaling/custommetrics/provider.go +++ b/pkg/clusteragent/autoscaling/custommetrics/provider.go @@ -9,6 +9,7 @@ package custommetrics import ( "context" + "errors" "fmt" "strings" "time" @@ -128,7 +129,7 @@ func (p *datadogProvider) externalMetricsSetter(ctx context.Context) { // GetExternalMetric is called by the Autoscaler Controller to get the value of the external metric it is currently evaluating. func (p *datadogProvider) GetExternalMetric(_ context.Context, _ string, metricSelector labels.Selector, info provider.ExternalMetricInfo) (*external_metrics.ExternalMetricValueList, error) { if !p.isServing || time.Now().Unix()-p.timestamp > 2*p.maxAge { - return nil, fmt.Errorf("external metrics invalid") + return nil, errors.New("external metrics invalid") } matchingMetrics := []external_metrics.ExternalMetricValue{} diff --git a/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go b/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go index 9dfbe88ae11d89..02d7043b45e21f 100644 --- a/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go +++ b/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go @@ -10,7 +10,7 @@ package custommetrics import ( "context" "encoding/json" - "fmt" + "errors" "sync" "time" @@ -19,14 +19,14 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" ) var ( - errNotInitialized = fmt.Errorf("configmap not initialized") + errNotInitialized = errors.New("configmap not initialized") ) // configMapStore provides persistent storage of custom and external metrics using a configmap. @@ -57,7 +57,7 @@ func NewConfigMapStore(client kubernetes.Interface, ns, name string) (Store, err return store, nil } - if !errors.IsNotFound(err) { + if !k8serrors.IsNotFound(err) { log.Infof("Error while attempting to fetch the configmap %s: %v", name, err) return nil, err } diff --git a/pkg/clusteragent/autoscaling/externalmetrics/autoscaler_watcher.go b/pkg/clusteragent/autoscaling/externalmetrics/autoscaler_watcher.go index c5f328a6ceb4f1..bed5879de4a5c9 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/autoscaler_watcher.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/autoscaler_watcher.go @@ -8,6 +8,7 @@ package externalmetrics import ( + "errors" "fmt" "strings" "time" @@ -80,12 +81,12 @@ func NewAutoscalerWatcher( store *DatadogMetricsInternalStore, ) (*AutoscalerWatcher, error) { if store == nil { - return nil, fmt.Errorf("Store must be initialized") + return nil, errors.New("Store must be initialized") } // Check that we have at least one valid resource to watch if informer == nil && wpaInformer == nil { - return nil, fmt.Errorf("Must enable at least HPA or WPA") + return nil, errors.New("Must enable at least HPA or WPA") } // Setup HPA diff --git a/pkg/clusteragent/autoscaling/externalmetrics/datadogmetric_controller.go b/pkg/clusteragent/autoscaling/externalmetrics/datadogmetric_controller.go index 1b9458269937ca..3d8f5502aafa49 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/datadogmetric_controller.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/datadogmetric_controller.go @@ -9,6 +9,7 @@ package externalmetrics import ( "context" + "errors" "fmt" "time" @@ -18,7 +19,7 @@ import ( datadoghq "github.com/DataDog/datadog-operator/api/datadoghq/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" @@ -66,7 +67,7 @@ type DatadogMetricController struct { // NewDatadogMetricController returns a new AutoscalersController func NewDatadogMetricController(client dynamic.Interface, informer dynamicinformer.DynamicSharedInformerFactory, isLeader func() bool, store *DatadogMetricsInternalStore) (*DatadogMetricController, error) { if store == nil { - return nil, fmt.Errorf("Store must be initialized") + return nil, errors.New("Store must be initialized") } datadogMetricsInformer := informer.ForResource(gvrDDM) @@ -200,7 +201,7 @@ func (c *DatadogMetricController) processDatadogMetric(workerID int, key interfa } switch { - case errors.IsNotFound(err): + case k8serrors.IsNotFound(err): // We ignore not found here as we may need to create a DatadogMetric later datadogMetricCached = nil case err != nil: diff --git a/pkg/clusteragent/autoscaling/externalmetrics/datadogmetric_controller_test.go b/pkg/clusteragent/autoscaling/externalmetrics/datadogmetric_controller_test.go index 97f6e385bbb403..baed7e98089d58 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/datadogmetric_controller_test.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/datadogmetric_controller_test.go @@ -8,7 +8,7 @@ package externalmetrics import ( - "fmt" + "errors" "testing" "time" @@ -298,7 +298,7 @@ func TestLeaderUpdateFromStoreAfterInitial(t *testing.T) { Value: 10.0, UpdateTime: updateTime, DataTime: updateTime, - Error: fmt.Errorf("Error from backend while fetching metric"), + Error: errors.New("Error from backend while fetching metric"), } ddm.SetQueries("metric query0") f.store.Set("default/dd-metric-0", ddm, "utest") @@ -383,7 +383,7 @@ func TestLeaderNoUpdate(t *testing.T) { Value: 10.0, UpdateTime: updateTime, DataTime: updateTime, - Error: fmt.Errorf("Error from backend while fetching metric"), + Error: errors.New("Error from backend while fetching metric"), } ddm.SetQueries("metric query0") f.store.Set("default/dd-metric-0", ddm, "utest") @@ -491,7 +491,7 @@ func TestCreateDatadogMetric(t *testing.T) { // Test create autogen without ExternalMetricName f.actions = nil - f.runControllerSync(true, "default/dd-metric-2", fmt.Errorf("Unable to create autogen DatadogMetric default/dd-metric-2 without ExternalMetricName")) + f.runControllerSync(true, "default/dd-metric-2", errors.New("Unable to create autogen DatadogMetric default/dd-metric-2 without ExternalMetricName")) assert.Empty(t, f.actions) } @@ -723,7 +723,7 @@ func TestFollower(t *testing.T) { Value: 20.0, UpdateTime: kubernetes.TimeWithoutWall(updateTime), DataTime: kubernetes.TimeWithoutWall(updateTime), - Error: fmt.Errorf("Error from backend while fetching metric"), + Error: errors.New("Error from backend while fetching metric"), } ddm.SetQueries("metric query0") f.store.Set("default/dd-metric-0", ddm, "utest") diff --git a/pkg/clusteragent/autoscaling/externalmetrics/metrics_retriever_backoff_test.go b/pkg/clusteragent/autoscaling/externalmetrics/metrics_retriever_backoff_test.go index 6b691792ff36f8..7b19df1917ccb5 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/metrics_retriever_backoff_test.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/metrics_retriever_backoff_test.go @@ -745,7 +745,7 @@ func TestRetrieveMetricsBatchErrorCasesWithBackoff(t *testing.T) { Value: 2.0, DataTime: defaultPreviousUpdateTime, Valid: false, - Error: fmt.Errorf("Backend error 400"), + Error: errors.New("Backend error 400"), Retries: 1, }, query: "query-metric1", @@ -820,7 +820,7 @@ func TestRetrieveMetricsBatchErrorCasesWithBackoff(t *testing.T) { Value: 2.0, DataTime: defaultPreviousUpdateTime, Valid: false, - Error: fmt.Errorf("Backend error 400"), + Error: errors.New("Backend error 400"), Retries: 1, }, query: "query-metric1", @@ -895,7 +895,7 @@ func TestRetrieveMetricsBatchErrorCasesWithBackoff(t *testing.T) { Value: 2.0, DataTime: defaultPreviousUpdateTime, Valid: false, - Error: fmt.Errorf("Backend error 500"), + Error: errors.New("Backend error 500"), Retries: 1, }, query: "query-metric1", @@ -907,7 +907,7 @@ func TestRetrieveMetricsBatchErrorCasesWithBackoff(t *testing.T) { Value: 3.0, DataTime: defaultPreviousUpdateTime, Valid: false, - Error: fmt.Errorf("Backend error 500"), + Error: errors.New("Backend error 500"), Retries: 1, }, query: "query-metric2", @@ -999,7 +999,7 @@ func TestRetrieveMetricsBatchErrorCasesWithBackoff(t *testing.T) { Value: 2.0, DataTime: defaultPreviousUpdateTime, Valid: false, - Error: fmt.Errorf("Backend error 500"), + Error: errors.New("Backend error 500"), Retries: 1, }, query: "query-metric1", @@ -1379,7 +1379,7 @@ func Test429TooManyRequestsErrorHandling(t *testing.T) { Value: 2.0, DataTime: defaultPreviousUpdateTime, Valid: false, - Error: fmt.Errorf("Backend error 500"), // Individual error + Error: errors.New("Backend error 500"), // Individual error }, query: "query-metric1", }, diff --git a/pkg/clusteragent/autoscaling/externalmetrics/metrics_retriever_test.go b/pkg/clusteragent/autoscaling/externalmetrics/metrics_retriever_test.go index 2f0d42e45205a5..7adb26b523af45 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/metrics_retriever_test.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/metrics_retriever_test.go @@ -418,7 +418,7 @@ func TestRetrieveMetricsErrorCases(t *testing.T) { Value: 1.0, DataTime: defaultPreviousUpdateTime, Valid: false, - Error: newBatchError(fmt.Errorf("Backend error 500"), time.Time{}), + Error: newBatchError(errors.New("Backend error 500"), time.Time{}), }, query: "query-metric0", }, @@ -429,7 +429,7 @@ func TestRetrieveMetricsErrorCases(t *testing.T) { Value: 2.0, DataTime: defaultPreviousUpdateTime, Valid: false, - Error: newBatchError(fmt.Errorf("Backend error 500"), time.Time{}), + Error: newBatchError(errors.New("Backend error 500"), time.Time{}), }, query: "query-metric1", }, diff --git a/pkg/clusteragent/autoscaling/externalmetrics/model/datadogmetricinternal.go b/pkg/clusteragent/autoscaling/externalmetrics/model/datadogmetricinternal.go index 062bd8cfde2408..005e0eed166454 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/model/datadogmetricinternal.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/model/datadogmetricinternal.go @@ -268,7 +268,7 @@ func (d *DatadogMetricInternal) ToExternalMetricFormat(externalMetricName string if d.Error != nil { return nil, d.Error } - return nil, fmt.Errorf("DatadogMetric is invalid, missing error details") + return nil, errors.New("DatadogMetric is invalid, missing error details") } if d.IsStale(metricsMaxAge, time.UTC().Unix(), gracePeriod) { diff --git a/pkg/clusteragent/autoscaling/externalmetrics/provider.go b/pkg/clusteragent/autoscaling/externalmetrics/provider.go index cb00178fe36d37..3865de93741f6e 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/provider.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/provider.go @@ -9,6 +9,7 @@ package externalmetrics import ( "context" + "errors" "fmt" "math" "strings" @@ -51,7 +52,7 @@ var ( // NewDatadogMetricProvider configures and returns a new datadogMetricProvider func NewDatadogMetricProvider(ctx context.Context, apiCl *apiserver.APIClient, datadogClient datadogclient.Component) (provider.ExternalMetricsProvider, error) { if apiCl == nil { - return nil, fmt.Errorf("Impossible to create DatadogMetricProvider without valid APIClient") + return nil, errors.New("Impossible to create DatadogMetricProvider without valid APIClient") } le, err := leaderelection.GetLeaderEngine() diff --git a/pkg/clusteragent/autoscaling/externalmetrics/provider_test.go b/pkg/clusteragent/autoscaling/externalmetrics/provider_test.go index 649ac52702e2da..ba52253f9aa0ac 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/provider_test.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/provider_test.go @@ -8,6 +8,7 @@ package externalmetrics import ( + "errors" "fmt" "testing" "time" @@ -130,7 +131,7 @@ func TestGetExternalMetrics(t *testing.T) { ID: "ns/metric0", DataTime: defaultUpdateTime, Valid: false, - Error: fmt.Errorf("Some error"), + Error: errors.New("Some error"), Value: 42.0, }, query: "query-metric0", @@ -138,7 +139,7 @@ func TestGetExternalMetrics(t *testing.T) { }, queryMetricName: "datadogmetric@ns:metric0", expectedExternalMetrics: nil, - expectedError: fmt.Errorf("Some error"), + expectedError: errors.New("Some error"), }, { desc: "Test DatadogMetric is invalid, no error", @@ -155,7 +156,7 @@ func TestGetExternalMetrics(t *testing.T) { }, queryMetricName: "datadogmetric@ns:metric0", expectedExternalMetrics: nil, - expectedError: fmt.Errorf("DatadogMetric is invalid, missing error details"), + expectedError: errors.New("DatadogMetric is invalid, missing error details"), }, { desc: "Test DatadogMetric not found", @@ -173,7 +174,7 @@ func TestGetExternalMetrics(t *testing.T) { }, queryMetricName: "datadogmetric@ns:metric1", expectedExternalMetrics: nil, - expectedError: fmt.Errorf("DatadogMetric not found for metric name: datadogmetric@ns:metric1, datadogmetricid: ns/metric1"), + expectedError: errors.New("DatadogMetric not found for metric name: datadogmetric@ns:metric1, datadogmetricid: ns/metric1"), }, { desc: "Test DatadogMetric not found", @@ -191,7 +192,7 @@ func TestGetExternalMetrics(t *testing.T) { }, queryMetricName: "datadogmetric@ns:metric1", expectedExternalMetrics: nil, - expectedError: fmt.Errorf("DatadogMetric not found for metric name: datadogmetric@ns:metric1, datadogmetricid: ns/metric1"), + expectedError: errors.New("DatadogMetric not found for metric name: datadogmetric@ns:metric1, datadogmetricid: ns/metric1"), }, { desc: "Test ExternalMetric use wrong DatadogMetric format", @@ -209,7 +210,7 @@ func TestGetExternalMetrics(t *testing.T) { }, queryMetricName: "datadogmetric@metric1", expectedExternalMetrics: nil, - expectedError: fmt.Errorf("ExternalMetric does not follow DatadogMetric format: datadogmetric@metric1"), + expectedError: errors.New("ExternalMetric does not follow DatadogMetric format: datadogmetric@metric1"), }, { desc: "Test ExternalMetric does not use DatadogMetric format", @@ -227,7 +228,7 @@ func TestGetExternalMetrics(t *testing.T) { }, queryMetricName: "nginx.net.request_per_s", expectedExternalMetrics: nil, - expectedError: fmt.Errorf("DatadogMetric not found for metric name: nginx.net.request_per_s, datadogmetricid: default/dcaautogen-32402d8dfc05cf540928a606d78ed68c0607f7"), + expectedError: errors.New("DatadogMetric not found for metric name: nginx.net.request_per_s, datadogmetricid: default/dcaautogen-32402d8dfc05cf540928a606d78ed68c0607f7"), }, } diff --git a/pkg/clusteragent/autoscaling/externalmetrics/utils.go b/pkg/clusteragent/autoscaling/externalmetrics/utils.go index bc6f83a5c0278e..6a3f4fc270fa25 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/utils.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/utils.go @@ -78,7 +78,7 @@ func buildDatadogQueryForExternalMetric(metricName string, labels map[string]str var result string if len(labels) == 0 { - result = fmt.Sprintf("%s{*}", metricName) + result = metricName + "{*}" } else { datadogTags := []string{} for key, val := range labels { diff --git a/pkg/clusteragent/autoscaling/workload/config_retriever_values.go b/pkg/clusteragent/autoscaling/workload/config_retriever_values.go index a97ae99c6ca360..d22e7a90af21ed 100644 --- a/pkg/clusteragent/autoscaling/workload/config_retriever_values.go +++ b/pkg/clusteragent/autoscaling/workload/config_retriever_values.go @@ -9,6 +9,7 @@ package workload import ( "encoding/json" + "errors" "fmt" "sync" "time" @@ -226,7 +227,7 @@ func parseHorizontalScalingData(timestamp time.Time, data *kubeAutoscaling.Workl if data.Replicas != nil { horizontalValues.Replicas = *data.Replicas } else { - return nil, fmt.Errorf("horizontal replicas value are missing") + return nil, errors.New("horizontal replicas value are missing") } return horizontalValues, nil diff --git a/pkg/clusteragent/autoscaling/workload/controller.go b/pkg/clusteragent/autoscaling/workload/controller.go index a7d1f312678d35..355a169b5a5182 100644 --- a/pkg/clusteragent/autoscaling/workload/controller.go +++ b/pkg/clusteragent/autoscaling/workload/controller.go @@ -9,10 +9,11 @@ package workload import ( "context" + "errors" "fmt" "time" - "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -162,13 +163,13 @@ func (c *Controller) processPodAutoscaler(ctx context.Context, key, ns, name str } switch { - case errors.IsNotFound(err): + case k8serrors.IsNotFound(err): // We ignore not found here as we may need to create a DatadogPodAutoscaler later podAutoscaler = nil case err != nil: return autoscaling.Requeue, fmt.Errorf("Unable to retrieve DatadogPodAutoscaler: %w", err) case podAutoscalerCachedObj == nil: - return autoscaling.Requeue, fmt.Errorf("Could not parse empty DatadogPodAutoscaler from local cache") + return autoscaling.Requeue, errors.New("Could not parse empty DatadogPodAutoscaler from local cache") } // No error path, check what to do with this event @@ -243,7 +244,7 @@ func (c *Controller) syncPodAutoscaler(ctx context.Context, key, ns, name string log.Infof("Remote owned PodAutoscaler with Deleted flag, deleting object: %s", key) err := c.deletePodAutoscaler(ns, name) // In case of not found, it means the object is gone but informer cache is not updated yet, we can safely delete it from our store - if err != nil && errors.IsNotFound(err) { + if err != nil && k8serrors.IsNotFound(err) { log.Debugf("Object %s not found in Kubernetes during deletion, clearing internal store", key) c.store.UnlockDelete(key, c.ID) return autoscaling.NoRequeue, nil @@ -489,7 +490,7 @@ func (c *Controller) validateAutoscaler(podAutoscalerInternal model.PodAutoscale clusterAgentNs := common.GetMyNamespace() if podAutoscalerInternal.Namespace() == clusterAgentNs && podAutoscalerInternal.Spec().TargetRef.Name == resourceName { - return fmt.Errorf("Autoscaling target cannot be set to the cluster agent") + return errors.New("Autoscaling target cannot be set to the cluster agent") } if err := validateAutoscalerObjectives(podAutoscalerInternal.Spec()); err != nil { return err @@ -501,7 +502,7 @@ func validateAutoscalerObjectives(spec *datadoghq.DatadogPodAutoscalerSpec) erro if spec.Fallback != nil && len(spec.Fallback.Horizontal.Objectives) > 0 { for _, objective := range spec.Fallback.Horizontal.Objectives { if objective.Type == datadoghqcommon.DatadogPodAutoscalerCustomQueryObjectiveType { - return fmt.Errorf("Autoscaler fallback cannot be based on custom query objective") + return errors.New("Autoscaler fallback cannot be based on custom query objective") } } } @@ -510,7 +511,7 @@ func validateAutoscalerObjectives(spec *datadoghq.DatadogPodAutoscalerSpec) erro switch objective.Type { case datadoghqcommon.DatadogPodAutoscalerCustomQueryObjectiveType: if objective.CustomQuery == nil { - return fmt.Errorf("Autoscaler objective type is custom query but customQueryObjective is nil") + return errors.New("Autoscaler objective type is custom query but customQueryObjective is nil") } case datadoghqcommon.DatadogPodAutoscalerPodResourceObjectiveType: if objective.PodResource == nil { diff --git a/pkg/clusteragent/autoscaling/workload/controller_test.go b/pkg/clusteragent/autoscaling/workload/controller_test.go index 3ba90d46013a79..1d8713de95aa4a 100755 --- a/pkg/clusteragent/autoscaling/workload/controller_test.go +++ b/pkg/clusteragent/autoscaling/workload/controller_test.go @@ -435,7 +435,7 @@ func TestDatadogPodAutoscalerTargetingClusterAgentErrors(t *testing.T) { t.Setenv("DD_POD_NAME", "datadog-agent-cluster-agent-7dbf798595-tp9lg") currentNs := common.GetMyNamespace() - id := fmt.Sprintf("%s/dpa-dca", currentNs) + id := currentNs + "/dpa-dca" dpaSpec := datadoghq.DatadogPodAutoscalerSpec{ TargetRef: tt.targetRef, @@ -542,9 +542,9 @@ func TestPodAutoscalerLocalOwnerObjectsLimit(t *testing.T) { } currentNs := common.GetMyNamespace() - dpaID := fmt.Sprintf("%s/dpa-0", currentNs) - dpa1ID := fmt.Sprintf("%s/dpa-1", currentNs) - dpa2ID := fmt.Sprintf("%s/dpa-2", currentNs) + dpaID := currentNs + "/dpa-0" + dpa1ID := currentNs + "/dpa-1" + dpa2ID := currentNs + "/dpa-2" dpaTime := testTime.Add(-1 * time.Hour) dpa1Time := testTime @@ -1119,7 +1119,7 @@ func TestGetActiveScalingSources(t *testing.T) { Spec: &datadoghq.DatadogPodAutoscalerSpec{}, MainScalingValues: model.ScalingValues{ Horizontal: nil, - HorizontalError: fmt.Errorf("test horizontal error"), + HorizontalError: errors.New("test horizontal error"), Vertical: &model.VerticalScalingValues{ Source: datadoghqcommon.DatadogPodAutoscalerAutoscalingValueSource, }, diff --git a/pkg/clusteragent/autoscaling/workload/controller_vertical.go b/pkg/clusteragent/autoscaling/workload/controller_vertical.go index 56e1aad6e479a9..13270b93c94d9d 100644 --- a/pkg/clusteragent/autoscaling/workload/controller_vertical.go +++ b/pkg/clusteragent/autoscaling/workload/controller_vertical.go @@ -148,7 +148,7 @@ func (u *verticalController) syncDeploymentKind( // Normally we should check updateStrategy here, we currently only support one way, so not required for now. // Generate the patch request which adds the scaling hash annotation to the pod template - gvr := targetGVK.GroupVersion().WithResource(fmt.Sprintf("%ss", strings.ToLower(targetGVK.Kind))) + gvr := targetGVK.GroupVersion().WithResource(strings.ToLower(targetGVK.Kind) + "s") patchTime := u.clock.Now() patchData, err := json.Marshal(map[string]interface{}{ "spec": map[string]interface{}{ diff --git a/pkg/clusteragent/autoscaling/workload/dump_test.go b/pkg/clusteragent/autoscaling/workload/dump_test.go index 541decd4180b40..3d836d86756193 100644 --- a/pkg/clusteragent/autoscaling/workload/dump_test.go +++ b/pkg/clusteragent/autoscaling/workload/dump_test.go @@ -10,6 +10,7 @@ package workload import ( "bytes" "encoding/json" + "errors" "fmt" "strings" "testing" @@ -322,9 +323,9 @@ func createFakePodAutoscaler(testTime time.Time) model.FakePodAutoscalerInternal }, }, }, - VerticalError: fmt.Errorf("test vertical error"), + VerticalError: errors.New("test vertical error"), HorizontalError: nil, - Error: fmt.Errorf("test error"), + Error: errors.New("test error"), }, MainScalingValues: model.ScalingValues{ Horizontal: &model.HorizontalScalingValues{ @@ -415,8 +416,8 @@ func createFakePodAutoscaler(testTime time.Time) model.FakePodAutoscalerInternal Version: "1", Type: datadoghqcommon.DatadogPodAutoscalerRolloutTriggeredVerticalActionType, }, - VerticalLastActionError: fmt.Errorf("test vertical last action error"), - Error: fmt.Errorf("test error"), + VerticalLastActionError: errors.New("test vertical last action error"), + Error: errors.New("test error"), } } diff --git a/pkg/clusteragent/autoscaling/workload/external/recommender_client.go b/pkg/clusteragent/autoscaling/workload/external/recommender_client.go index 67e5afc3f289dc..b88ab042da220f 100644 --- a/pkg/clusteragent/autoscaling/workload/external/recommender_client.go +++ b/pkg/clusteragent/autoscaling/workload/external/recommender_client.go @@ -10,6 +10,7 @@ package external import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -54,7 +55,7 @@ func newRecommenderClient(ctx context.Context, clock clock.Clock, podWatcher wor func (r *recommenderClient) GetReplicaRecommendation(ctx context.Context, clusterName string, dpa model.PodAutoscalerInternal) (*model.HorizontalScalingValues, error) { recommenderConfig := dpa.CustomRecommenderConfiguration() if recommenderConfig == nil { // should not happen; we should not process autoscalers without recommender config - return nil, fmt.Errorf("external recommender spec is required") + return nil, errors.New("external recommender spec is required") } u, err := url.Parse(recommenderConfig.Endpoint) @@ -63,7 +64,7 @@ func (r *recommenderClient) GetReplicaRecommendation(ctx context.Context, cluste } if u.Scheme != "http" && u.Scheme != "https" { - return nil, fmt.Errorf("only http and https schemes are supported") + return nil, errors.New("only http and https schemes are supported") } req, err := r.buildWorkloadRecommendationRequest(clusterName, dpa, recommenderConfig) diff --git a/pkg/clusteragent/autoscaling/workload/external/recommender_client_testutils.go b/pkg/clusteragent/autoscaling/workload/external/recommender_client_testutils.go index b88d92e17592ca..98cdccbe332395 100644 --- a/pkg/clusteragent/autoscaling/workload/external/recommender_client_testutils.go +++ b/pkg/clusteragent/autoscaling/workload/external/recommender_client_testutils.go @@ -8,8 +8,6 @@ package external import ( - "fmt" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" ) @@ -18,7 +16,7 @@ func newFakeWLMPodEvent(ns, deployment, podName string, containerNames []string) containers := []workloadmeta.OrchestratorContainer{} for _, c := range containerNames { containers = append(containers, workloadmeta.OrchestratorContainer{ - ID: fmt.Sprintf("%s-id", c), + ID: c + "-id", Name: c, Resources: workloadmeta.ContainerResources{ CPURequest: func(f float64) *float64 { return &f }(25), // 250m @@ -36,7 +34,7 @@ func newFakeWLMPodEvent(ns, deployment, podName string, containerNames []string) Name: podName, Namespace: ns, }, - Owners: []workloadmeta.KubernetesPodOwner{{Kind: kubernetes.ReplicaSetKind, Name: fmt.Sprintf("%s-766dbb7846", deployment)}}, + Owners: []workloadmeta.KubernetesPodOwner{{Kind: kubernetes.ReplicaSetKind, Name: deployment + "-766dbb7846"}}, Containers: containers, Ready: true, } diff --git a/pkg/clusteragent/autoscaling/workload/external/tls.go b/pkg/clusteragent/autoscaling/workload/external/tls.go index 32ab7976ad2d33..3ee1da8d083417 100644 --- a/pkg/clusteragent/autoscaling/workload/external/tls.go +++ b/pkg/clusteragent/autoscaling/workload/external/tls.go @@ -11,6 +11,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "os" "sync" @@ -59,7 +60,7 @@ func buildTLSConfig(config *TLSFilesConfig) (*tls.Config, error) { } rootCA = x509.NewCertPool() if !rootCA.AppendCertsFromPEM(caPEM) { - return nil, fmt.Errorf("failed to append root CA to pool") + return nil, errors.New("failed to append root CA to pool") } } diff --git a/pkg/clusteragent/autoscaling/workload/local/recommendation_settings.go b/pkg/clusteragent/autoscaling/workload/local/recommendation_settings.go index 0290c951acf12c..addedac0a060bd 100644 --- a/pkg/clusteragent/autoscaling/workload/local/recommendation_settings.go +++ b/pkg/clusteragent/autoscaling/workload/local/recommendation_settings.go @@ -9,6 +9,7 @@ package local import ( + "errors" "fmt" datadoghqcommon "github.com/DataDog/datadog-operator/api/datadoghq/common" @@ -64,7 +65,7 @@ func newResourceRecommenderSettings(objective datadoghqcommon.DatadogPodAutoscal func getOptionsFromPodResource(target *datadoghqcommon.DatadogPodAutoscalerPodResourceObjective) (*resourceRecommenderSettings, error) { if target == nil { - return nil, fmt.Errorf("nil target") + return nil, errors.New("nil target") } if err := validateTarget(target.Value.Type, target.Name, target.Value); err != nil { @@ -81,7 +82,7 @@ func getOptionsFromPodResource(target *datadoghqcommon.DatadogPodAutoscalerPodRe func getOptionsFromContainerResource(target *datadoghqcommon.DatadogPodAutoscalerContainerResourceObjective) (*resourceRecommenderSettings, error) { if target == nil { - return nil, fmt.Errorf("nil target") + return nil, errors.New("nil target") } if err := validateTarget(target.Value.Type, target.Name, target.Value); err != nil { @@ -116,10 +117,10 @@ func validateTarget(targetType datadoghqcommon.DatadogPodAutoscalerObjectiveValu func validateUtilizationValue(value datadoghqcommon.DatadogPodAutoscalerObjectiveValue) error { if value.Utilization == nil { - return fmt.Errorf("missing utilization value") + return errors.New("missing utilization value") } if *value.Utilization < 1 || *value.Utilization > 100 { - return fmt.Errorf("utilization value must be between 1 and 100") + return errors.New("utilization value must be between 1 and 100") } return nil } diff --git a/pkg/clusteragent/autoscaling/workload/local/recommendation_settings_test.go b/pkg/clusteragent/autoscaling/workload/local/recommendation_settings_test.go index cd7c9c92ba7909..822fa6baf1b451 100644 --- a/pkg/clusteragent/autoscaling/workload/local/recommendation_settings_test.go +++ b/pkg/clusteragent/autoscaling/workload/local/recommendation_settings_test.go @@ -8,7 +8,7 @@ package local import ( - "fmt" + "errors" "testing" "github.com/google/go-cmp/cmp" @@ -32,7 +32,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { Type: "something-invalid", }, want: nil, - err: fmt.Errorf("Invalid target type: something-invalid"), + err: errors.New("Invalid target type: something-invalid"), }, { name: "Pod resource - CPU target utilization", @@ -81,7 +81,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { PodResource: nil, }, want: nil, - err: fmt.Errorf("nil target"), + err: errors.New("nil target"), }, { name: "Pod resource - invalid name", @@ -96,7 +96,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { }, }, want: nil, - err: fmt.Errorf("invalid resource name: some-resource"), + err: errors.New("invalid resource name: some-resource"), }, { name: "Pod resource - nil utilization", @@ -110,7 +110,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { }, }, want: nil, - err: fmt.Errorf("invalid utilization value: missing utilization value"), + err: errors.New("invalid utilization value: missing utilization value"), }, { name: "Pod resource - out of bounds utilization value", @@ -125,7 +125,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { }, }, want: nil, - err: fmt.Errorf("invalid utilization value: utilization value must be between 1 and 100"), + err: errors.New("invalid utilization value: utilization value must be between 1 and 100"), }, { name: "Container resource - CPU target utilization", @@ -178,7 +178,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { ContainerResource: nil, }, want: nil, - err: fmt.Errorf("nil target"), + err: errors.New("nil target"), }, { name: "Container resource - invalid name", @@ -193,7 +193,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { }, }, want: nil, - err: fmt.Errorf("invalid resource name: some-resource"), + err: errors.New("invalid resource name: some-resource"), }, { name: "Container resource - nil utilization", @@ -208,7 +208,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { }, }, want: nil, - err: fmt.Errorf("invalid utilization value: missing utilization value"), + err: errors.New("invalid utilization value: missing utilization value"), }, { name: "Container resource - out of bounds utilization value", @@ -224,7 +224,7 @@ func TestNewResourceRecommenderSettings(t *testing.T) { }, }, want: nil, - err: fmt.Errorf("invalid utilization value: utilization value must be between 1 and 100"), + err: errors.New("invalid utilization value: utilization value must be between 1 and 100"), }, } diff --git a/pkg/clusteragent/autoscaling/workload/local/recommender_testutils.go b/pkg/clusteragent/autoscaling/workload/local/recommender_testutils.go index eea1ec0a4e84a7..27ce5ccca92989 100644 --- a/pkg/clusteragent/autoscaling/workload/local/recommender_testutils.go +++ b/pkg/clusteragent/autoscaling/workload/local/recommender_testutils.go @@ -8,7 +8,6 @@ package local import ( - "fmt" "sync" autoscalingv2 "k8s.io/api/autoscaling/v2" @@ -32,7 +31,7 @@ func newFakeWLMPodEvent(ns, deployment, podName string, containerNames []string) containers := []workloadmeta.OrchestratorContainer{} for _, c := range containerNames { containers = append(containers, workloadmeta.OrchestratorContainer{ - ID: fmt.Sprintf("%s-id", c), + ID: c + "-id", Name: c, Resources: workloadmeta.ContainerResources{ CPURequest: func(f float64) *float64 { return &f }(25), // 250m @@ -50,7 +49,7 @@ func newFakeWLMPodEvent(ns, deployment, podName string, containerNames []string) Name: podName, Namespace: ns, }, - Owners: []workloadmeta.KubernetesPodOwner{{Kind: kubernetes.ReplicaSetKind, Name: fmt.Sprintf("%s-766dbb7846", deployment)}}, + Owners: []workloadmeta.KubernetesPodOwner{{Kind: kubernetes.ReplicaSetKind, Name: deployment + "-766dbb7846"}}, Containers: containers, } diff --git a/pkg/clusteragent/autoscaling/workload/local/replica_calculator.go b/pkg/clusteragent/autoscaling/workload/local/replica_calculator.go index 79b0d18c3fe969..6d195cd3cfa647 100644 --- a/pkg/clusteragent/autoscaling/workload/local/replica_calculator.go +++ b/pkg/clusteragent/autoscaling/workload/local/replica_calculator.go @@ -9,6 +9,7 @@ package local import ( + "errors" "fmt" "math" "time" @@ -166,11 +167,11 @@ func calculateUtilization(recSettings resourceRecommenderSettings, pods []*workl lastValidTimestamp := time.Time{} if len(pods) == 0 { - return utilizationResult{}, fmt.Errorf("No pods found") + return utilizationResult{}, errors.New("No pods found") } if len(queryResult.Results) == 0 { - return utilizationResult{}, fmt.Errorf("Issue fetching metrics data") + return utilizationResult{}, errors.New("Issue fetching metrics data") } for _, pod := range pods { @@ -212,7 +213,7 @@ func calculateUtilization(recSettings resourceRecommenderSettings, pods []*workl } if podCount == 0 { - return utilizationResult{}, fmt.Errorf("Issue calculating pod utilization") + return utilizationResult{}, errors.New("Issue calculating pod utilization") } return utilizationResult{ @@ -247,7 +248,7 @@ func getContainerMetrics(queryResult loadstore.QueryResult, podName, containerNa // corresponding timestamp to use to generate a recommendation func processAverageContainerMetricValue(series []loadstore.EntityValue, currentTime time.Time, fallbackStaleDataThreshold int64) (float64, time.Time, error) { if len(series) < 2 { // too little metrics data - return 0.0, time.Time{}, fmt.Errorf("Missing usage metrics") + return 0.0, time.Time{}, errors.New("Missing usage metrics") } values := []loadstore.ValueType{} diff --git a/pkg/clusteragent/autoscaling/workload/local/replica_calculator_test.go b/pkg/clusteragent/autoscaling/workload/local/replica_calculator_test.go index 04a40b7c559b3e..601ef7b27f4dc7 100644 --- a/pkg/clusteragent/autoscaling/workload/local/replica_calculator_test.go +++ b/pkg/clusteragent/autoscaling/workload/local/replica_calculator_test.go @@ -9,7 +9,7 @@ package local import ( "context" - "fmt" + "errors" "testing" "time" @@ -46,7 +46,7 @@ func TestProcessAverageContainerMetricValue(t *testing.T) { series: []loadstore.EntityValue{}, averageMetric: 0.0, lastTimestamp: time.Time{}, - err: fmt.Errorf("Missing usage metrics"), + err: errors.New("Missing usage metrics"), }, { name: "Series with valid values (non-stale)", @@ -104,7 +104,7 @@ func TestCalculateUtilizationPodResource(t *testing.T) { queryResult: loadstore.QueryResult{}, currentTime: time.Time{}, want: utilizationResult{}, - err: fmt.Errorf("No pods found"), + err: errors.New("No pods found"), }, { name: "Pods with empty query results", @@ -132,7 +132,7 @@ func TestCalculateUtilizationPodResource(t *testing.T) { queryResult: loadstore.QueryResult{}, currentTime: testTime, want: utilizationResult{}, - err: fmt.Errorf("Issue fetching metrics data"), + err: errors.New("Issue fetching metrics data"), }, { name: "Pods with no corresponding metrics data", @@ -172,7 +172,7 @@ func TestCalculateUtilizationPodResource(t *testing.T) { }, currentTime: testTime, want: utilizationResult{}, - err: fmt.Errorf("Issue calculating pod utilization"), + err: errors.New("Issue calculating pod utilization"), }, { name: "Single pod and container", @@ -471,7 +471,7 @@ func TestCalculateUtilizationContainerResource(t *testing.T) { queryResult: loadstore.QueryResult{}, currentTime: time.Time{}, want: utilizationResult{}, - err: fmt.Errorf("No pods found"), + err: errors.New("No pods found"), }, { name: "Pods with empty query results", @@ -499,7 +499,7 @@ func TestCalculateUtilizationContainerResource(t *testing.T) { queryResult: loadstore.QueryResult{}, currentTime: testTime, want: utilizationResult{}, - err: fmt.Errorf("Issue fetching metrics data"), + err: errors.New("Issue fetching metrics data"), }, { name: "Pods with no corresponding metrics data", @@ -539,7 +539,7 @@ func TestCalculateUtilizationContainerResource(t *testing.T) { }, currentTime: testTime, want: utilizationResult{}, - err: fmt.Errorf("Issue calculating pod utilization"), + err: errors.New("Issue calculating pod utilization"), }, { name: "Single pod and container", @@ -919,7 +919,7 @@ func TestRecommend(t *testing.T) { currentTime: testTime, recommendedReplicas: 0, utilizationRes: utilizationResult{}, - err: fmt.Errorf("Issue fetching metrics data"), + err: errors.New("Issue fetching metrics data"), }, { name: "Pods with no corresponding metrics data", @@ -960,7 +960,7 @@ func TestRecommend(t *testing.T) { currentTime: testTime, recommendedReplicas: 0, utilizationRes: utilizationResult{}, - err: fmt.Errorf("Issue calculating pod utilization"), + err: errors.New("Issue calculating pod utilization"), }, { name: "Scale down expected", diff --git a/pkg/clusteragent/autoscaling/workload/model/pod_autoscaler_test.go b/pkg/clusteragent/autoscaling/workload/model/pod_autoscaler_test.go index 3d2f96671b9ee0..73c72bb8809ba3 100644 --- a/pkg/clusteragent/autoscaling/workload/model/pod_autoscaler_test.go +++ b/pkg/clusteragent/autoscaling/workload/model/pod_autoscaler_test.go @@ -9,7 +9,6 @@ package model import ( "errors" - "fmt" "testing" "time" @@ -374,7 +373,7 @@ func TestParseCustomConfigurationAnnotation(t *testing.T) { CustomRecommenderAnnotationKey: "{\"endpoint: \"localhost:8080/test\",}", }, expected: nil, - err: fmt.Errorf("Failed to parse annotations for custom recommender configuration: invalid character 'l' after object key"), + err: errors.New("Failed to parse annotations for custom recommender configuration: invalid character 'l' after object key"), }, } diff --git a/pkg/clusteragent/autoscaling/workload/pod_watcher.go b/pkg/clusteragent/autoscaling/workload/pod_watcher.go index c51de6bdeb2225..1b39489b539264 100644 --- a/pkg/clusteragent/autoscaling/workload/pod_watcher.go +++ b/pkg/clusteragent/autoscaling/workload/pod_watcher.go @@ -9,7 +9,7 @@ package workload import ( "context" - "fmt" + "errors" "sync" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -21,7 +21,7 @@ const ( patcherQueueSize = 100 ) -var errDeploymentNotValidOwner = fmt.Errorf("deployment is not a valid owner") +var errDeploymentNotValidOwner = errors.New("deployment is not a valid owner") // NamespacedPodOwner represents a pod owner in a namespace type NamespacedPodOwner struct { diff --git a/pkg/clusteragent/autoscaling/workload/provider/provider.go b/pkg/clusteragent/autoscaling/workload/provider/provider.go index aae819a1f00146..4403369818fbc9 100644 --- a/pkg/clusteragent/autoscaling/workload/provider/provider.go +++ b/pkg/clusteragent/autoscaling/workload/provider/provider.go @@ -9,6 +9,7 @@ package provider import ( "context" + "errors" "fmt" corev1 "k8s.io/api/core/v1" @@ -41,7 +42,7 @@ func StartWorkloadAutoscaling( senderManager sender.SenderManager, ) (workload.PodPatcher, error) { if apiCl == nil { - return nil, fmt.Errorf("Impossible to start workload autoscaling without valid APIClient") + return nil, errors.New("Impossible to start workload autoscaling without valid APIClient") } eventBroadcaster := record.NewBroadcaster() diff --git a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go index c7da3d88d96eb7..7718416cb785e8 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go @@ -9,6 +9,7 @@ package clusterchecks import ( "encoding/json" + "errors" "fmt" "sort" "time" @@ -55,7 +56,7 @@ func (d *dispatcher) calculateAvg() (int, error) { } if length == 0 { - return -1, fmt.Errorf("zero nodes reporting") + return -1, errors.New("zero nodes reporting") } return busyness / length, nil diff --git a/pkg/clusteragent/clusterchecks/handler_api.go b/pkg/clusteragent/clusterchecks/handler_api.go index 03a7abdfff6739..0395e0711fe350 100644 --- a/pkg/clusteragent/clusterchecks/handler_api.go +++ b/pkg/clusteragent/clusterchecks/handler_api.go @@ -9,7 +9,6 @@ package clusterchecks import ( "errors" - "fmt" "net/http" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" @@ -105,7 +104,7 @@ func (h *Handler) GetAllEndpointsCheckConfigs() (types.ConfigResponse, error) { // RebalanceClusterChecks triggers an attempt to rebalance cluster checks func (h *Handler) RebalanceClusterChecks(force bool) ([]types.RebalanceResponse, error) { if !h.dispatcher.advancedDispatching.Load() { - return nil, fmt.Errorf("no checks to rebalance: advanced dispatching is not enabled") + return nil, errors.New("no checks to rebalance: advanced dispatching is not enabled") } rebalancingDecisions := h.dispatcher.rebalance(force) diff --git a/pkg/clusteragent/clusterchecks/ksm_sharding.go b/pkg/clusteragent/clusterchecks/ksm_sharding.go index 33b8d60825e50a..3eb2cf2f723611 100644 --- a/pkg/clusteragent/clusterchecks/ksm_sharding.go +++ b/pkg/clusteragent/clusterchecks/ksm_sharding.go @@ -8,6 +8,7 @@ package clusterchecks import ( + "errors" "fmt" "sort" @@ -68,7 +69,7 @@ func (m *ksmShardingManager) analyzeKSMConfig(config integration.Config) ([]reso } if len(instances) == 0 { - return nil, fmt.Errorf("no valid KSM instances found") + return nil, errors.New("no valid KSM instances found") } if len(instances) > 1 { @@ -133,7 +134,7 @@ func (m *ksmShardingManager) analyzeKSMConfig(config integration.Config) ([]reso } if len(groups) == 0 { - return nil, fmt.Errorf("no collectors found after parsing") + return nil, errors.New("no collectors found after parsing") } return groups, nil @@ -190,7 +191,7 @@ func (m *ksmShardingManager) createShardedKSMConfigs( } if len(groups) == 0 { - return nil, fmt.Errorf("no resource groups to shard") + return nil, errors.New("no resource groups to shard") } // Always create shards (pods, nodes, others) regardless of runner count diff --git a/pkg/clusteragent/languagedetection/patcher.go b/pkg/clusteragent/languagedetection/patcher.go index e6fc0f7ec34eb6..fa412cb4f80ea1 100644 --- a/pkg/clusteragent/languagedetection/patcher.go +++ b/pkg/clusteragent/languagedetection/patcher.go @@ -90,11 +90,11 @@ var ( func Start(ctx context.Context, store workloadmeta.Component, logger log.Component, datadogConfig config.Component) error { if patcher != nil { - return fmt.Errorf("can't start language detection patcher twice") + return errors.New("can't start language detection patcher twice") } if store == nil { - return fmt.Errorf("cannot initialize patcher with a nil workloadmeta store") + return errors.New("cannot initialize patcher with a nil workloadmeta store") } apiCl, err := apiserver.GetAPIClient() diff --git a/pkg/collector/corechecks/cluster/helm/helm.go b/pkg/collector/corechecks/cluster/helm/helm.go index 56c4e3bc3c5275..000995adb70197 100644 --- a/pkg/collector/corechecks/cluster/helm/helm.go +++ b/pkg/collector/corechecks/cluster/helm/helm.go @@ -238,14 +238,14 @@ func (hc *HelmCheck) tagsForMetricsAndEvents(release *release, includeRevision b helmChartTag := fmt.Sprintf("helm_chart:%s-%s", release.Chart.Metadata.Name, escapedVersion) tags = append( tags, - fmt.Sprintf("helm_chart_version:%s", release.Chart.Metadata.Version), - fmt.Sprintf("helm_app_version:%s", release.Chart.Metadata.AppVersion), + "helm_chart_version:"+release.Chart.Metadata.Version, + "helm_app_version:"+release.Chart.Metadata.AppVersion, helmChartTag, ) } if release.Info != nil { - tags = append(tags, fmt.Sprintf("helm_status:%s", release.Info.Status)) + tags = append(tags, "helm_status:"+release.Info.Status) } for helmValue, tagName := range hc.instance.HelmValuesAsTags { @@ -265,19 +265,19 @@ func (hc *HelmCheck) tagsForMetricsAndEvents(release *release, includeRevision b // revisions func commonTags(release *release, storageDriver helmStorage) []string { tags := []string{ - fmt.Sprintf("helm_release:%s", release.Name), + "helm_release:" + release.Name, fmt.Sprintf("helm_storage:%s", storageDriver), - fmt.Sprintf("kube_namespace:%s", release.Namespace), + "kube_namespace:" + release.Namespace, // "helm_namespace" is just an alias for "kube_namespace". // "kube_namespace" is a better name and consistent with the rest of // checks, but in the first release of the check we had "helm_namespace" // so we need to keep it for backwards-compatibility. - fmt.Sprintf("helm_namespace:%s", release.Namespace), + "helm_namespace:" + release.Namespace, } if release.Chart != nil && release.Chart.Metadata != nil { - tags = append(tags, fmt.Sprintf("helm_chart_name:%s", release.Chart.Metadata.Name)) + tags = append(tags, "helm_chart_name:"+release.Chart.Metadata.Name) } return tags diff --git a/pkg/collector/corechecks/cluster/helm/map_utils.go b/pkg/collector/corechecks/cluster/helm/map_utils.go index fbb13e74d56e70..745de084e1514c 100644 --- a/pkg/collector/corechecks/cluster/helm/map_utils.go +++ b/pkg/collector/corechecks/cluster/helm/map_utils.go @@ -8,6 +8,7 @@ package helm import ( + "errors" "fmt" "reflect" "strings" @@ -33,7 +34,7 @@ import ( // "agents.image" is not. func getValue(m map[string]interface{}, dotSeparatedKey string) (string, error) { if dotSeparatedKey == "" { - return "", fmt.Errorf("not found") + return "", errors.New("not found") } keys := strings.Split(dotSeparatedKey, ".") @@ -41,7 +42,7 @@ func getValue(m map[string]interface{}, dotSeparatedKey string) (string, error) for _, key := range keys { if obj == nil || reflect.TypeOf(obj).Kind() != reflect.Map { - return "", fmt.Errorf("not found") + return "", errors.New("not found") } mapValue := reflect.ValueOf(obj) @@ -49,14 +50,14 @@ func getValue(m map[string]interface{}, dotSeparatedKey string) (string, error) objValue := mapValue.MapIndex(reflect.ValueOf(key)) if !objValue.IsValid() { - return "", fmt.Errorf("not found") + return "", errors.New("not found") } obj = objValue.Interface() } if obj == nil || reflect.TypeOf(obj).Kind() == reflect.Map { - return "", fmt.Errorf("not found") + return "", errors.New("not found") } return fmt.Sprintf("%v", reflect.ValueOf(obj)), nil diff --git a/pkg/collector/corechecks/cluster/helm/release.go b/pkg/collector/corechecks/cluster/helm/release.go index bc9e62bdc59b30..0131491ac2a152 100644 --- a/pkg/collector/corechecks/cluster/helm/release.go +++ b/pkg/collector/corechecks/cluster/helm/release.go @@ -12,8 +12,10 @@ import ( "compress/gzip" "encoding/base64" "encoding/json" + "errors" "fmt" "io" + "strconv" ) // The "release" struct and the related ones, are a simplified version of the @@ -81,7 +83,7 @@ func decodeRelease(data string) (*release, error) { } if len(b) < 4 { // Avoid panic if b[0:3] cannot be accessed - return nil, fmt.Errorf("The byte array is too short (expected at least 4 characters, got %s instead): it cannot contain a Helm release", fmt.Sprint(len(b))) + return nil, fmt.Errorf("The byte array is too short (expected at least 4 characters, got %s instead): it cannot contain a Helm release", strconv.Itoa(len(b))) } // For backwards compatibility with releases that were stored before // compression was introduced we skip decompression if the @@ -116,12 +118,12 @@ func (rel *release) getConfigValue(dotSeparatedKey string) (string, error) { value, err := getValue(rel.Config, dotSeparatedKey) if err != nil { if rel.Chart == nil { - return "", fmt.Errorf("not found") + return "", errors.New("not found") } value, err = getValue(rel.Chart.Values, dotSeparatedKey) if err != nil { - return "", fmt.Errorf("not found") + return "", errors.New("not found") } } diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go index c0a15f6751f4bf..2f210b5f5edbf6 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go @@ -245,20 +245,20 @@ func getInvolvedObjectTags(involvedObject v1.ObjectReference, taggerInstance tag // non-namespaced ones, or kubernetes_*. The latter two are now // considered deprecated. tagList := []string{ - fmt.Sprintf("kube_kind:%s", involvedObject.Kind), - fmt.Sprintf("kube_name:%s", involvedObject.Name), + "kube_kind:" + involvedObject.Kind, + "kube_name:" + involvedObject.Name, // DEPRECATED: - fmt.Sprintf("kubernetes_kind:%s", involvedObject.Kind), - fmt.Sprintf("name:%s", involvedObject.Name), + "kubernetes_kind:" + involvedObject.Kind, + "name:" + involvedObject.Name, } if involvedObject.Namespace != "" { tagList = append(tagList, - fmt.Sprintf("kube_namespace:%s", involvedObject.Namespace), + "kube_namespace:"+involvedObject.Namespace, // DEPRECATED: - fmt.Sprintf("namespace:%s", involvedObject.Namespace), + "namespace:"+involvedObject.Namespace, ) namespaceEntityID := types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "namespaces", "", involvedObject.Namespace))) diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go index 5424c3b9d7a6e6..0a8f5496fd7389 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go @@ -8,7 +8,6 @@ package kubernetesapiserver import ( - "fmt" "reflect" "testing" @@ -187,7 +186,7 @@ func Test_getInvolvedObjectTags(t *testing.T) { } func Test_getEventHostInfoImpl(t *testing.T) { - providerIDFunc := func(clusterName string) string { return fmt.Sprintf("foo-%s", clusterName) } + providerIDFunc := func(clusterName string) string { return "foo-" + clusterName } type args struct { clusterName string diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go index ca360302706c48..395d2f268be3d0 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go @@ -12,7 +12,6 @@ package kubernetesapiserver import ( "context" "errors" - "fmt" "strings" "time" @@ -373,7 +372,7 @@ func (k *KubeASCheck) parseComponentStatus(sender sender.Sender, componentsStatu } } - tags := []string{fmt.Sprintf("component:%s", component.Name)} + tags := []string{"component:" + component.Name} sender.ServiceCheck(KubeControlPaneCheck, statusCheck, "", tags, message) } } @@ -419,7 +418,7 @@ func convertFilters(conf []string) string { for _, filter := range conf { f := strings.Split(filter, "=") if len(f) == 1 { - formatedFilters = append(formatedFilters, fmt.Sprintf("reason!=%s", f[0])) + formatedFilters = append(formatedFilters, "reason!="+f[0]) continue } formatedFilters = append(formatedFilters, filter) diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle.go index e574ee5601ff5e..8a68d8348b3557 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle.go @@ -74,18 +74,18 @@ func (b *kubernetesEventBundle) formatEvents(taggerInstance tagger.Component) (e readableKey := buildReadableKey(b.involvedObject) tags := getInvolvedObjectTags(b.involvedObject, taggerInstance) - tags = append(tags, fmt.Sprintf("source_component:%s", b.component)) + tags = append(tags, "source_component:"+b.component) tags = append(tags, "orchestrator:kubernetes") - tags = append(tags, fmt.Sprintf("reporting_controller:%s", b.reportingController)) + tags = append(tags, "reporting_controller:"+b.reportingController) if b.hostInfo.providerID != "" { - tags = append(tags, fmt.Sprintf("host_provider_id:%s", b.hostInfo.providerID)) + tags = append(tags, "host_provider_id:"+b.hostInfo.providerID) } // If hostname was not defined, the aggregator will then set the local hostname output := event.Event{ - Title: fmt.Sprintf("Events from the %s", readableKey), + Title: "Events from the " + readableKey, Priority: event.PriorityNormal, Host: b.hostInfo.hostname, SourceTypeName: getEventSource(b.reportingController, b.component), diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle_test.go index 5236329bd2c2a9..bca4524f6a5dd5 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle_test.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_eventbundle_test.go @@ -83,11 +83,11 @@ func TestFormatEvent(t *testing.T) { "source_component:default-scheduler", "orchestrator:kubernetes", "reporting_controller:default-scheduler", - fmt.Sprintf("kube_name:%s", podName), - fmt.Sprintf("name:%s", podName), - fmt.Sprintf("pod_name:%s", podName), + "kube_name:" + podName, + "name:" + podName, + "pod_name:" + podName, }, - AggregationKey: fmt.Sprintf("kubernetes_apiserver:%s", objUID), + AggregationKey: "kubernetes_apiserver:" + objUID, Text: "%%% \n" + fmt.Sprintf( "%s \n _Events emitted by the %s seen at %s since %s_ \n", "2 **Scheduled**: Successfully assigned dca-789976f5d7-2ljx6 to ip-10-0-0-54\n "+ @@ -118,11 +118,11 @@ func TestFormatEvent(t *testing.T) { "source_component:default-scheduler", "orchestrator:kubernetes", "reporting_controller:default-scheduler", - fmt.Sprintf("kube_name:%s", podName), - fmt.Sprintf("name:%s", podName), - fmt.Sprintf("pod_name:%s", podName), + "kube_name:" + podName, + "name:" + podName, + "pod_name:" + podName, }, - AggregationKey: fmt.Sprintf("kubernetes_apiserver:%s", objUID), + AggregationKey: "kubernetes_apiserver:" + objUID, Text: "%%% \n" + fmt.Sprintf( "%s \n _Events emitted by the %s seen at %s since %s_ \n", "1 **Failed**: Error: error response: filepath: \\~file\\~\n", @@ -156,11 +156,11 @@ func TestFormatEvent(t *testing.T) { "host_provider_id:test-host-provider-id", "orchestrator:kubernetes", "reporting_controller:default-scheduler", - fmt.Sprintf("kube_name:%s", podName), - fmt.Sprintf("name:%s", podName), - fmt.Sprintf("pod_name:%s", podName), + "kube_name:" + podName, + "name:" + podName, + "pod_name:" + podName, }, - AggregationKey: fmt.Sprintf("kubernetes_apiserver:%s", objUID), + AggregationKey: "kubernetes_apiserver:" + objUID, Text: "%%% \n" + fmt.Sprintf( "%s \n _Events emitted by the %s seen at %s since %s_ \n", "2 **Scheduled**: Successfully assigned dca-789976f5d7-2ljx6 to ip-10-0-0-54\n "+ diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift.go index bd002ae380b4ab..284556bc7fe6be 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift.go @@ -49,7 +49,7 @@ func (k *KubeASCheck) retrieveOShiftClusterQuotas() ([]osq.ClusterResourceQuota, // reportClusterQuotas reports metrics on OpenShift ClusterResourceQuota objects func (k *KubeASCheck) reportClusterQuotas(quotas []osq.ClusterResourceQuota, sender sender.Sender) { for _, quota := range quotas { - quotaTags := []string{fmt.Sprintf("clusterquota:%s", quota.Name)} + quotaTags := []string{"clusterquota:" + quota.Name} remaining := computeQuotaRemaining(quota.Status.Total.Used, quota.Status.Total.Hard) k.reportQuota(quota.Status.Total.Hard, "openshift.clusterquota", "limit", quotaTags, sender) @@ -57,7 +57,7 @@ func (k *KubeASCheck) reportClusterQuotas(quotas []osq.ClusterResourceQuota, sen k.reportQuota(remaining, "openshift.clusterquota", "remaining", quotaTags, sender) for _, nsQuota := range quota.Status.Namespaces { - nsTags := append(quotaTags, fmt.Sprintf("kube_namespace:%s", nsQuota.Namespace)) + nsTags := append(quotaTags, "kube_namespace:"+nsQuota.Namespace) k.reportQuota(nsQuota.Status.Hard, "openshift.appliedclusterquota", "limit", nsTags, sender) k.reportQuota(nsQuota.Status.Used, "openshift.appliedclusterquota", "used", nsTags, sender) k.reportQuota(remaining, "openshift.appliedclusterquota", "remaining", nsTags, sender) diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go index 71b384f6270b56..fbb5d76222b91a 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/unbundled_events.go @@ -136,15 +136,15 @@ func (c *unbundledTransformer) buildEventTags(ev *v1.Event, involvedObject v1.Ob // Hardcoded tags tagsAccumulator.Append( - fmt.Sprintf("source_component:%s", ev.Source.Component), + "source_component:"+ev.Source.Component, "orchestrator:kubernetes", - fmt.Sprintf("reporting_controller:%s", ev.ReportingController), - fmt.Sprintf("event_reason:%s", ev.Reason), + "reporting_controller:"+ev.ReportingController, + "event_reason:"+ev.Reason, ) // Specific providerID tag if hostInfo.providerID != "" { - tagsAccumulator.Append(fmt.Sprintf("host_provider_id:%s", hostInfo.providerID)) + tagsAccumulator.Append("host_provider_id:" + hostInfo.providerID) } // Tags from the involved object, including tags from object namespace diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go index b0c8cc24007002..54ca046ac68036 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go @@ -65,7 +65,7 @@ func (cm CollectorMetadata) CollectorTags() []string { if cm.Version == "" { return nil } - return []string{fmt.Sprintf("kube_api_version:%s", cm.Version)} + return []string{"kube_api_version:" + cm.Version} } // FullName returns a string that contains the collector name and version. diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go index 15e2091c7deaed..b6e98ab9b4de7e 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go @@ -9,7 +9,7 @@ package ecs import ( - "fmt" + "errors" "github.com/benbjohnson/clock" @@ -89,7 +89,7 @@ func (t *TaskCollector) Process(rcfg *collectors.CollectorRunConfig, list interf processResult, listed, processed := t.processor.Process(ctx, list) if processed == -1 { - return nil, fmt.Errorf("unable to process resources: a panic occurred") + return nil, errors.New("unable to process resources: a panic occurred") } result := &collectors.CollectorRunResult{ diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/errors.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/errors.go index bdcf544237aa66..96d310a065f573 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/errors.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/errors.go @@ -8,15 +8,13 @@ package collectors import ( - "fmt" - "github.com/pkg/errors" ) var ( // ErrProcessingPanic is the error raised when a panic was caught on resource // processing. - ErrProcessingPanic = fmt.Errorf("unable to process resources: a panic occurred") + ErrProcessingPanic = errors.New("unable to process resources: a panic occurred") ) // NewListingError creates an error that wraps the cause of a listing failure. diff --git a/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery.go b/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery.go index 5ae2030c457d7c..0e183a144d8930 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery.go +++ b/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery.go @@ -10,6 +10,7 @@ package discovery import ( "context" + "errors" "fmt" "time" @@ -50,7 +51,7 @@ func (p *APIServerDiscoveryProvider) Discover(inventory *inventory.CollectorInve } if len(resources) == 0 { - return nil, fmt.Errorf("failed to discover resources from API groups") + return nil, errors.New("failed to discover resources from API groups") } preferredResources, otherResources := identifyResources(groups, resources) diff --git a/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery_crd.go b/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery_crd.go index 00e80c794b5adc..3f12be585a13e6 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery_crd.go +++ b/pkg/collector/corechecks/cluster/orchestrator/discovery/collector_discovery_crd.go @@ -8,6 +8,7 @@ package discovery import ( + "errors" "fmt" "strings" @@ -47,7 +48,7 @@ func NewDiscoveryCollectorForInventory() *DiscoveryCollector { } err := dc.fillCache() if err != nil { - log.Errorc(fmt.Sprintf("Fail to init discovery collector : %s", err.Error()), orchestrator.ExtraLogContext...) + log.Errorc("Fail to init discovery collector : "+err.Error(), orchestrator.ExtraLogContext...) } return dc } @@ -62,7 +63,7 @@ func (d *DiscoveryCollector) fillCache() error { } if len(d.cache.Resources) == 0 { - return fmt.Errorf("failed to discover resources from API groups") + return errors.New("failed to discover resources from API groups") } for _, list := range d.cache.Resources { for _, resource := range list.APIResources { diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cluster.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cluster.go index dfe0376d90755b..ea77ec00b1fcd5 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cluster.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cluster.go @@ -12,6 +12,7 @@ import ( "context" "encoding/json" "fmt" + "strconv" model "github.com/DataDog/agent-payload/v5/process" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" @@ -216,7 +217,7 @@ func fillClusterResourceVersion(c *model.Cluster) error { } version := murmur3.Sum64(jsonClustermodel) - c.ResourceVersion = fmt.Sprint(version) + c.ResourceVersion = strconv.FormatUint(version, 10) return nil } diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go index e3712ac41bdc58..69efb175c6f8b1 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go @@ -93,7 +93,7 @@ func (h *PodHandlers) BeforeCacheCheck(ctx processors.ProcessorContext, resource m.Tags = append(m.Tags, taggerTags...) // additional tags - m.Tags = append(m.Tags, fmt.Sprintf("pod_status:%s", strings.ToLower(m.Status))) + m.Tags = append(m.Tags, "pod_status:"+strings.ToLower(m.Status)) // tags that should be on the tagger if len(taggerTags) == 0 { @@ -108,7 +108,7 @@ func (h *PodHandlers) BeforeCacheCheck(ctx processors.ProcessorContext, resource // Custom resource version to work around kubelet issues. if err := k8sTransformers.FillK8sPodResourceVersion(m); err != nil { - log.Warnc(fmt.Sprintf("Failed to compute pod resource version: %s", err.Error()), orchestrator.ExtraLogContext...) + log.Warnc("Failed to compute pod resource version: "+err.Error(), orchestrator.ExtraLogContext...) skip = true return } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go index e5629a33f4f25f..824ed284d85e85 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/ecs/task.go @@ -227,14 +227,14 @@ func toTags(tags map[string]string) []string { func BuildTaskResourceVersion(model interface{}) string { modelJSON, err := json.Marshal(model) if err != nil { - log.Warnc(fmt.Sprintf("Fail to compute ECS task resource version: %s", err.Error()), orchestrator.ExtraLogContext...) + log.Warnc("Fail to compute ECS task resource version: "+err.Error(), orchestrator.ExtraLogContext...) return "" } h := fnv.New64a() _, err = h.Write(modelJSON) if err != nil { - log.Warnc(fmt.Sprintf("Fail to compute ECS task resource version: %s", err.Error()), orchestrator.ExtraLogContext...) + log.Warnc("Fail to compute ECS task resource version: "+err.Error(), orchestrator.ExtraLogContext...) return "" } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node.go index 08da9ab0d35a33..0618e248a46371 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node.go @@ -9,9 +9,10 @@ package k8s import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "strings" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" + model "github.com/DataDog/agent-payload/v5/process" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" @@ -148,7 +149,7 @@ func convertNodeStatusToTags(nodeStatus string) []string { tags = append(tags, "node_schedulable:false") continue } - tags = append(tags, fmt.Sprintf("node_status:%s", strings.ToLower(status))) + tags = append(tags, "node_status:"+strings.ToLower(status)) } if !unschedulable { tags = append(tags, "node_schedulable:true") diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go index 67e43b2c82c660..7ffcf9f641677f 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go @@ -211,7 +211,7 @@ func FillK8sPodResourceVersion(p *model.Pod) error { // Replace the payload metadata field with the custom version. version := murmur3.Sum64(jsonPodModel) - p.Metadata.ResourceVersion = fmt.Sprint(version) + p.Metadata.ResourceVersion = strconv.FormatUint(version, 10) return nil } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget_test.go index 00593e77a96ef6..6421afe35fdc65 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget_test.go @@ -8,7 +8,6 @@ package k8s import ( - "fmt" "testing" "time" @@ -261,8 +260,8 @@ func TestExtractPodDisruptionBudget(t *testing.T) { CreationTimestamp: 0, DeletionTimestamp: 0, Labels: []string{ - fmt.Sprintf("%s:ultimate", kubernetes.VersionTagLabelKey), - fmt.Sprintf("%s:honorable", kubernetes.ServiceTagLabelKey), + kubernetes.VersionTagLabelKey + ":ultimate", + kubernetes.ServiceTagLabelKey + ":honorable", "app:my-app", }, Annotations: []string{"annotation:my-annotation"}, diff --git a/pkg/collector/corechecks/containers/containerd/check.go b/pkg/collector/corechecks/containers/containerd/check.go index fc819dce4cbef9..b65cad08f2d141 100644 --- a/pkg/collector/corechecks/containers/containerd/check.go +++ b/pkg/collector/corechecks/containers/containerd/check.go @@ -185,7 +185,7 @@ func (c *ContainerdCheck) scrapeOpenmetricsEndpoint(sender sender.Sender) error return nil } - openmetricsEndpoint := fmt.Sprintf("%s/v1/metrics", c.instance.OpenmetricsEndpoint) + openmetricsEndpoint := c.instance.OpenmetricsEndpoint + "/v1/metrics" resp, err := c.httpClient.Get(openmetricsEndpoint) if err != nil { return err diff --git a/pkg/collector/corechecks/containers/containerd/containerd_transformers.go b/pkg/collector/corechecks/containers/containerd/containerd_transformers.go index d6bd8c94b5b966..baa27766bda1fd 100644 --- a/pkg/collector/corechecks/containers/containerd/containerd_transformers.go +++ b/pkg/collector/corechecks/containers/containerd/containerd_transformers.go @@ -46,7 +46,7 @@ func imagePullMetricTransformer(s sender.Sender, _ string, sample model.Sample) metricTags := []string{ fmt.Sprintf("grpc_service:%s", metric["grpc_service"]), - fmt.Sprintf("grpc_code:%s", toSnakeCase(string(grpcCode))), + "grpc_code:" + toSnakeCase(string(grpcCode)), } s.MonotonicCount("containerd.image.pull", float64(sample.Value), "", metricTags) diff --git a/pkg/collector/corechecks/containers/containerd/events.go b/pkg/collector/corechecks/containers/containerd/events.go index 3d9a041a3e672b..30e4aa528f9ce0 100644 --- a/pkg/collector/corechecks/containers/containerd/events.go +++ b/pkg/collector/corechecks/containers/containerd/events.go @@ -9,6 +9,7 @@ package containerd import ( "context" + "errors" "fmt" "strings" "sync" @@ -62,7 +63,7 @@ func (c *ContainerdCheck) computeEvents(events []containerdEvent, sender sender. eventType := getEventType(e.Topic) if eventType != "" { - tags = append(tags, fmt.Sprintf("event_type:%s", eventType)) + tags = append(tags, "event_type:"+eventType) } if split[2] == "oom" { @@ -76,7 +77,7 @@ func (c *ContainerdCheck) computeEvents(events []containerdEvent, sender sender. SourceTypeName: CheckName, EventType: CheckName, AlertType: alertType, - AggregationKey: fmt.Sprintf("containerd:%s", e.Topic), + AggregationKey: "containerd:" + e.Topic, Text: e.Message, Ts: e.Timestamp.Unix(), Tags: tags, @@ -179,7 +180,7 @@ func (s *subscriber) run(ctx context.Context) error { s.Lock() if s.running { s.Unlock() - return fmt.Errorf("subscriber is already running the event listener routine") + return errors.New("subscriber is already running the event listener routine") } excludePauseContainers := pkgconfigsetup.Datadog().GetBool("exclude_pause_container") @@ -405,7 +406,7 @@ func (s *subscriber) run(ctx context.Context) error { return nil } log.Errorf("Error while streaming logs from containerd: %s", e.Error()) - return fmt.Errorf("stopping Containerd event listener routine") + return errors.New("stopping Containerd event listener routine") } } } diff --git a/pkg/collector/corechecks/containers/containerd/events_test.go b/pkg/collector/corechecks/containers/containerd/events_test.go index 5816d3556a49c9..066c9cb0af1906 100644 --- a/pkg/collector/corechecks/containers/containerd/events_test.go +++ b/pkg/collector/corechecks/containers/containerd/events_test.go @@ -9,7 +9,7 @@ package containerd import ( "context" - "fmt" + "errors" "testing" "time" @@ -102,7 +102,7 @@ func TestCheckEvents(t *testing.T) { ev := sub.Flush(time.Now().Unix()) assert.Len(t, ev, 1) assert.Equal(t, ev[0].Topic, "/tasks/paused") - errorsCh <- fmt.Errorf("chan breaker") + errorsCh <- errors.New("chan breaker") require.Eventually(t, func() bool { return !sub.isRunning() }, testTimeout, testTicker) @@ -211,7 +211,7 @@ func TestCheckEvents_PauseContainers(t *testing.T) { Image: "nginx:latest", }, nil } - return containers.Container{}, fmt.Errorf("container not found") + return containers.Container{}, errors.New("container not found") }, MockIsSandbox: func(namespace string, ctn containerd.Container) (bool, error) { return namespace == testNamespace && (ctn.ID() == existingPauseContainerID || ctn.ID() == newPauseContainerID), nil @@ -284,7 +284,7 @@ func TestCheckEvents_PauseContainers(t *testing.T) { cha <- &eventContainerDelete // Stop the subscriber before checking events to avoid race condition - errorsCh <- fmt.Errorf("stop subscriber") + errorsCh <- errors.New("stop subscriber") assert.Eventually(t, func() bool { return !sub.isRunning() }, testTimeout, testTicker) flushed := sub.Flush(time.Now().Unix()) diff --git a/pkg/collector/corechecks/containers/containerd/utils.go b/pkg/collector/corechecks/containers/containerd/utils.go index ff925bbfcaa25d..e3d1d283e7b025 100644 --- a/pkg/collector/corechecks/containers/containerd/utils.go +++ b/pkg/collector/corechecks/containers/containerd/utils.go @@ -8,8 +8,6 @@ package containerd import ( - "fmt" - workloadfilter "github.com/DataDog/datadog-agent/comp/core/workloadfilter/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic" @@ -29,13 +27,13 @@ func getProcessorFilter(containerFilter workloadfilter.FilterBundle, store workl func getImageTags(imageName string) []string { long, _, short, tag, err := pkgcontainersimage.SplitImageName(imageName) if err != nil { - return []string{fmt.Sprintf("image:%s", imageName)} + return []string{"image:" + imageName} } return []string{ - fmt.Sprintf("image:%s", imageName), - fmt.Sprintf("image_name:%s", long), - fmt.Sprintf("image_tag:%s", tag), - fmt.Sprintf("short_image:%s", short), + "image:" + imageName, + "image_name:" + long, + "image_tag:" + tag, + "short_image:" + short, } } diff --git a/pkg/collector/corechecks/containers/docker/eventbundle.go b/pkg/collector/corechecks/containers/docker/eventbundle.go index b981ad269831c4..6c85b1b78fd88a 100644 --- a/pkg/collector/corechecks/containers/docker/eventbundle.go +++ b/pkg/collector/corechecks/containers/docker/eventbundle.go @@ -80,7 +80,7 @@ func (b *dockerEventBundle) toDatadogEvent(hostname string) (event.Event, error) EventType: CheckName, AlertType: b.alertType, Ts: b.maxTimestamp.Unix(), - AggregationKey: fmt.Sprintf("docker:%s", b.imageName), + AggregationKey: "docker:" + b.imageName, } seenContainers := make(map[string]bool) diff --git a/pkg/collector/corechecks/containers/docker/unbundled_events.go b/pkg/collector/corechecks/containers/docker/unbundled_events.go index 0a9f9a264d63dd..4ec077793b227e 100644 --- a/pkg/collector/corechecks/containers/docker/unbundled_events.go +++ b/pkg/collector/corechecks/containers/docker/unbundled_events.go @@ -78,7 +78,7 @@ func (t *unbundledTransformer) Transform(events []*docker.ContainerEvent) ([]eve EventType: CheckName, AlertType: alertType, Ts: ev.Timestamp.Unix(), - AggregationKey: fmt.Sprintf("docker:%s", ev.ContainerID), + AggregationKey: "docker:" + ev.ContainerID, }) } diff --git a/pkg/collector/corechecks/containers/docker/unbundled_events_test.go b/pkg/collector/corechecks/containers/docker/unbundled_events_test.go index 05d51889298aa2..3da46e119b543a 100644 --- a/pkg/collector/corechecks/containers/docker/unbundled_events_test.go +++ b/pkg/collector/corechecks/containers/docker/unbundled_events_test.go @@ -134,7 +134,7 @@ func TestUnbundledEventsTransform(t *testing.T) { fakeTagger.SetTags( types.NewEntityID(types.ContainerID, ev.ContainerID), "docker", - []string{fmt.Sprintf("image_name:%s", ev.ImageName), fmt.Sprintf("container_name:%s", ev.ContainerName)}, + []string{"image_name:" + ev.ImageName, "container_name:" + ev.ContainerName}, []string{}, []string{}, []string{}, diff --git a/pkg/collector/corechecks/containers/docker/utils.go b/pkg/collector/corechecks/containers/docker/utils.go index 5164b154b0c12a..80a33ff7b4d52c 100644 --- a/pkg/collector/corechecks/containers/docker/utils.go +++ b/pkg/collector/corechecks/containers/docker/utils.go @@ -8,8 +8,6 @@ package docker import ( - "fmt" - "github.com/docker/docker/api/types/events" workloadfilter "github.com/DataDog/datadog-agent/comp/core/workloadfilter/def" @@ -35,10 +33,10 @@ func getImageTags(imageName string) ([]string, error) { } return []string{ - fmt.Sprintf("docker_image:%s", imageName), - fmt.Sprintf("image_name:%s", long), - fmt.Sprintf("image_tag:%s", tag), - fmt.Sprintf("short_image:%s", short), + "docker_image:" + imageName, + "image_name:" + long, + "image_tag:" + tag, + "short_image:" + short, }, nil } diff --git a/pkg/collector/corechecks/containers/generic/processor_test.go b/pkg/collector/corechecks/containers/generic/processor_test.go index 5f52df68dc1461..c64799835d3b76 100644 --- a/pkg/collector/corechecks/containers/generic/processor_test.go +++ b/pkg/collector/corechecks/containers/generic/processor_test.go @@ -6,7 +6,7 @@ package generic import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -180,7 +180,7 @@ func TestProcessorRunPartialStats(t *testing.T) { containersStats := map[string]mock.ContainerEntry{ "cID202": { - Error: fmt.Errorf("Unable to read some stuff"), + Error: errors.New("Unable to read some stuff"), }, } diff --git a/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go index 413eba16f9e17a..5a7fb51032d2c0 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/cadvisor/provider.go @@ -419,7 +419,7 @@ func (p *Provider) getContainerName(labels model.Metric) string { func (p *Provider) getKubeContainerNameTag(labels model.Metric) string { containerName := p.getContainerName(labels) if containerName != "" { - return fmt.Sprintf("kube_container_name:%s", containerName) + return "kube_container_name:" + containerName } return "" } diff --git a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go index 784e47538cfe66..cc52b2e32c3a0d 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/kubelet/provider_test.go @@ -8,7 +8,7 @@ package kubelet import ( - "fmt" + "errors" "strconv" "testing" @@ -412,7 +412,7 @@ func (suite *ProviderTestSuite) TestSendAlwaysCounterSubsequentRuns() { func (suite *ProviderTestSuite) TestFirstRunRemainsOnError() { // Create a mock that returns an error - testError := fmt.Errorf("kubelet connection failed") + testError := errors.New("kubelet connection failed") errorResponse := commontesting.NewEndpointResponse( "", 500, testError) diff --git a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go index 7388fd839d13a4..1450a997f798a8 100644 --- a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go +++ b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go @@ -1061,7 +1061,7 @@ func hashMapNumberOfEntriesWithBatch(mp *ebpf.Map, buffers *entryCountBuffers, m // We got a batch and it's the first one, and we didn't reach the end of the map, so we need to store the keys we got here // so that later on we can check against them to see if we got an iteration restart if enoughSpaceForFullBatch { // A sanity check - return -1, fmt.Errorf("Unexpected batch lookup result: we should have enough space to get the full map in one batch, but BatchLookup returned a partial result") + return -1, errors.New("Unexpected batch lookup result: we should have enough space to get the full map in one batch, but BatchLookup returned a partial result") } // Keep track the keys of the first batch so we can look them up later to see if we got restarted diff --git a/pkg/collector/corechecks/embed/apm/apm.go b/pkg/collector/corechecks/embed/apm/apm.go index dd72b8eec28a9c..ad8298dd1eaac3 100644 --- a/pkg/collector/corechecks/embed/apm/apm.go +++ b/pkg/collector/corechecks/embed/apm/apm.go @@ -11,7 +11,6 @@ package apm import ( "bufio" "context" - "fmt" "os" "os/exec" "time" @@ -103,10 +102,10 @@ func (c *APMCheck) run() error { hname, _ := hostname.Get(context.TODO()) env := os.Environ() - env = append(env, fmt.Sprintf("DD_API_KEY=%s", utils.SanitizeAPIKey(pkgconfigsetup.Datadog().GetString("api_key")))) - env = append(env, fmt.Sprintf("DD_HOSTNAME=%s", hname)) - env = append(env, fmt.Sprintf("DD_DOGSTATSD_PORT=%s", pkgconfigsetup.Datadog().GetString("dogstatsd_port"))) - env = append(env, fmt.Sprintf("DD_LOG_LEVEL=%s", pkgconfigsetup.Datadog().GetString("log_level"))) + env = append(env, "DD_API_KEY="+utils.SanitizeAPIKey(pkgconfigsetup.Datadog().GetString("api_key"))) + env = append(env, "DD_HOSTNAME="+hname) + env = append(env, "DD_DOGSTATSD_PORT="+pkgconfigsetup.Datadog().GetString("dogstatsd_port")) + env = append(env, "DD_LOG_LEVEL="+pkgconfigsetup.Datadog().GetString("log_level")) cmd.Env = env // forward the standard output to the Agent logger @@ -189,7 +188,7 @@ func (c *APMCheck) Configure(_ sender.SenderManager, _ uint64, data integration. // explicitly provide to the trace-agent the agent configuration file if _, err := os.Stat(configFile); !os.IsNotExist(err) { - c.commandOpts = append(c.commandOpts, fmt.Sprintf("-config=%s", configFile)) + c.commandOpts = append(c.commandOpts, "-config="+configFile) } c.source = source diff --git a/pkg/collector/corechecks/embed/process/process_agent.go b/pkg/collector/corechecks/embed/process/process_agent.go index f3632766eff418..43a9fe993188f3 100644 --- a/pkg/collector/corechecks/embed/process/process_agent.go +++ b/pkg/collector/corechecks/embed/process/process_agent.go @@ -195,7 +195,7 @@ func (c *ProcessAgentCheck) Configure(senderManager sender.SenderManager, _ uint configFile := pkgconfigsetup.Datadog().ConfigFileUsed() c.commandOpts = []string{} if _, err := os.Stat(configFile); !os.IsNotExist(err) { - c.commandOpts = append(c.commandOpts, fmt.Sprintf("-config=%s", configFile)) + c.commandOpts = append(c.commandOpts, "-config="+configFile) } c.source = source diff --git a/pkg/collector/corechecks/gpu/gpu.go b/pkg/collector/corechecks/gpu/gpu.go index f0414c5340650a..46d2a18902c49e 100644 --- a/pkg/collector/corechecks/gpu/gpu.go +++ b/pkg/collector/corechecks/gpu/gpu.go @@ -9,6 +9,7 @@ package gpu import ( + "errors" "fmt" "time" @@ -110,7 +111,7 @@ func newCheckTelemetryMetrics(tm telemetry.Component) *checkTelemetryMetrics { func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, config, initConfig integration.Data, source string) error { // Check if GPU check is enabled (follows SBOM pattern) if !pkgconfigsetup.Datadog().GetBool("gpu.enabled") { - return fmt.Errorf("GPU check is disabled") + return errors.New("GPU check is disabled") } if err := c.CommonConfigure(senderManager, initConfig, config, source); err != nil { diff --git a/pkg/collector/corechecks/gpu/gpu_test.go b/pkg/collector/corechecks/gpu/gpu_test.go index ea6534ab2f7ea9..890b93edf8c1d0 100644 --- a/pkg/collector/corechecks/gpu/gpu_test.go +++ b/pkg/collector/corechecks/gpu/gpu_test.go @@ -10,6 +10,7 @@ package gpu import ( "fmt" "slices" + "strconv" "sync/atomic" "testing" "time" @@ -486,7 +487,7 @@ func TestRunEmitsCorrectTags(t *testing.T) { pid := int32(len(processes)+i) + 1000 process := &workloadmeta.Process{ EntityID: workloadmeta.EntityID{ - ID: fmt.Sprintf("%d", pid), + ID: strconv.Itoa(int(pid)), Kind: workloadmeta.KindProcess, }, Owner: &container.EntityID, @@ -495,7 +496,7 @@ func TestRunEmitsCorrectTags(t *testing.T) { NsPid: pid, } - processTags := []string{"pid:" + fmt.Sprintf("%d", pid), "nspid:" + fmt.Sprintf("%d", pid)} + processTags := []string{"pid:" + strconv.Itoa(int(pid)), "nspid:" + strconv.Itoa(int(pid))} containerTags := []string{"container_id:" + container.EntityID.ID} processes = append(processes, process) diff --git a/pkg/collector/corechecks/gpu/nvidia/device_events.go b/pkg/collector/corechecks/gpu/nvidia/device_events.go index d0e9d8e60ce7d2..de2a5bac9cbe34 100644 --- a/pkg/collector/corechecks/gpu/nvidia/device_events.go +++ b/pkg/collector/corechecks/gpu/nvidia/device_events.go @@ -8,6 +8,7 @@ package nvidia import ( + "errors" "fmt" "maps" "slices" @@ -55,7 +56,7 @@ func newDeviceEventsCollector(device ddnvml.Device, deps *CollectorDependencies) // used internally for testing func newDeviceEventsCollectorWithCache(device ddnvml.Device, cache deviceEventsCollectorCache) (c Collector, err error) { if cache == nil { - return nil, fmt.Errorf("device events gatherer cannot be nil") + return nil, errors.New("device events gatherer cannot be nil") } if supported, err := cache.SupportsDevice(device); err != nil { @@ -305,7 +306,7 @@ func (c *DeviceEventsGatherer) RegisterDevice(device ddnvml.Device) error { c.evtSetMtx.Lock() defer c.evtSetMtx.Unlock() if c.evtSet == nil { - return fmt.Errorf("failed registering device events on stopped gatherer") + return errors.New("failed registering device events on stopped gatherer") } if err := device.RegisterEvents(evtTypes&eventSetMask, c.evtSet); err != nil { return fmt.Errorf("failed registering device events: %w", err) diff --git a/pkg/collector/corechecks/gpu/nvidia/ebpf.go b/pkg/collector/corechecks/gpu/nvidia/ebpf.go index 062bb5f6ee6981..7ff13147279ce0 100644 --- a/pkg/collector/corechecks/gpu/nvidia/ebpf.go +++ b/pkg/collector/corechecks/gpu/nvidia/ebpf.go @@ -8,6 +8,7 @@ package nvidia import ( + "errors" "fmt" "strconv" @@ -49,7 +50,7 @@ func NewSystemProbeCache() *SystemProbeCache { // Returns error if the refresh fails, nil on success. func (c *SystemProbeCache) Refresh() error { if c.client == nil { - return fmt.Errorf("system-probe client is nil") + return errors.New("system-probe client is nil") } stats, err := sysprobeclient.GetCheck[model.GPUStats](c.client, sysconfig.GPUMonitoringModule) @@ -93,7 +94,7 @@ type ebpfCollector struct { // newEbpfCollector creates a new eBPF-based collector for the given device. func newEbpfCollector(device ddnvml.Device, cache *SystemProbeCache) (*ebpfCollector, error) { if cache == nil { - return nil, fmt.Errorf("system-probe cache cannot be nil") + return nil, errors.New("system-probe cache cannot be nil") } return &ebpfCollector{ diff --git a/pkg/collector/corechecks/gpu/nvidia/helpers.go b/pkg/collector/corechecks/gpu/nvidia/helpers.go index 9575c12a0993ee..1b84b09fe08dbf 100644 --- a/pkg/collector/corechecks/gpu/nvidia/helpers.go +++ b/pkg/collector/corechecks/gpu/nvidia/helpers.go @@ -130,7 +130,7 @@ func GetDeviceTagsMapping(deviceCache ddnvml.DeviceCache, tagger tagger.Componen if len(tags) == 0 { // If we get no tags (either WMS hasn't collected GPUs yet, or we are running the check standalone with 'agent check') // add at least the UUID as a tag to distinguish the values. - tags = []string{fmt.Sprintf("gpu_uuid:%s", uuid)} + tags = []string{"gpu_uuid:" + uuid} } tagsMapping[uuid] = tags diff --git a/pkg/collector/corechecks/gpu/nvidia/stateless.go b/pkg/collector/corechecks/gpu/nvidia/stateless.go index 133930e17826b2..ff74c4c8d574e6 100644 --- a/pkg/collector/corechecks/gpu/nvidia/stateless.go +++ b/pkg/collector/corechecks/gpu/nvidia/stateless.go @@ -359,7 +359,7 @@ func createStatelessAPIs() []apiCallInfo { value = 1.0 } allMetrics = append(allMetrics, Metric{ - Name: fmt.Sprintf("clock.throttle_reasons.%s", reasonName), + Name: "clock.throttle_reasons." + reasonName, Value: value, Type: metrics.GaugeType, }) @@ -415,7 +415,7 @@ func createStatelessAPIs() []apiCallInfo { Value: float64(count), Type: metrics.CountType, Tags: []string{ - fmt.Sprintf("memory_location:%s", memoryLocationName), + "memory_location:" + memoryLocationName, }, }}, 0, nil }, diff --git a/pkg/collector/corechecks/gpu/tags.go b/pkg/collector/corechecks/gpu/tags.go index 5458a84bc9ae33..f71a8596a8bb06 100644 --- a/pkg/collector/corechecks/gpu/tags.go +++ b/pkg/collector/corechecks/gpu/tags.go @@ -280,7 +280,7 @@ func getNsPID(pid int32) (int32, error) { func (c *WorkloadTagCache) getContainerID(pid int32) (string, error) { if c.pidToCid == nil { if c.containerProvider == nil { - return "", fmt.Errorf("no container provider available") + return "", errors.New("no container provider available") } // Get the PID -> CID mapping from the container provider with no cache validity, as we have already failed to hit the diff --git a/pkg/collector/corechecks/gpu/tags_test.go b/pkg/collector/corechecks/gpu/tags_test.go index 4bdc3ae9259ebd..14e75740344307 100644 --- a/pkg/collector/corechecks/gpu/tags_test.go +++ b/pkg/collector/corechecks/gpu/tags_test.go @@ -82,7 +82,7 @@ func newContainerWorkloadID(containerID string) workloadmeta.EntityID { func newProcessWorkloadID(pid int32) workloadmeta.EntityID { return workloadmeta.EntityID{ Kind: workloadmeta.KindProcess, - ID: fmt.Sprintf("%d", pid), + ID: strconv.FormatInt(int64(pid), 10), } } @@ -440,7 +440,7 @@ func TestBuildProcessTagsFromWorkloadMetaIncludingContainer(t *testing.T) { process := &workloadmeta.Process{ EntityID: workloadmeta.EntityID{ Kind: workloadmeta.KindProcess, - ID: fmt.Sprintf("%d", pid), + ID: strconv.FormatInt(int64(pid), 10), }, NsPid: nspid, Owner: &container.EntityID, @@ -450,7 +450,7 @@ func TestBuildProcessTagsFromWorkloadMetaIncludingContainer(t *testing.T) { // Set up tagger for container setWorkloadTags(t, mocks.tagger, container.EntityID, containerTags, nil, nil) - tags, err := cache.buildProcessTags(fmt.Sprintf("%d", pid)) + tags, err := cache.buildProcessTags(strconv.FormatInt(int64(pid), 10)) require.NoError(t, err) expectedTags := []string{ @@ -473,14 +473,14 @@ func TestBuildProcessTagsWithoutContainer(t *testing.T) { process := &workloadmeta.Process{ EntityID: workloadmeta.EntityID{ Kind: workloadmeta.KindProcess, - ID: fmt.Sprintf("%d", pid), + ID: strconv.FormatInt(int64(pid), 10), }, NsPid: nspid, Owner: nil, } mocks.workloadMeta.Set(process) - tags, err := cache.buildProcessTags(fmt.Sprintf("%d", pid)) + tags, err := cache.buildProcessTags(strconv.FormatInt(int64(pid), 10)) require.NoError(t, err) expectedTags := []string{ @@ -501,14 +501,14 @@ func TestBuildProcessTagsNsPidZero(t *testing.T) { process := &workloadmeta.Process{ EntityID: workloadmeta.EntityID{ Kind: workloadmeta.KindProcess, - ID: fmt.Sprintf("%d", pid), + ID: strconv.FormatInt(int64(pid), 10), }, NsPid: 0, Owner: nil, } mocks.workloadMeta.Set(process) - tags, err := cache.buildProcessTags(fmt.Sprintf("%d", pid)) + tags, err := cache.buildProcessTags(strconv.FormatInt(int64(pid), 10)) require.NoError(t, err) expectedTags := []string{ @@ -538,7 +538,7 @@ func TestBuildProcessTagsWithNoNsPidField(t *testing.T) { // Ensure the process exists in the fake procfs require.True(t, kernel.ProcessExists(int(pid))) - tags, err := cache.buildProcessTags(fmt.Sprintf("%d", pid)) + tags, err := cache.buildProcessTags(strconv.FormatInt(int64(pid), 10)) require.NoError(t, err) expectedTags := []string{ @@ -576,7 +576,7 @@ func TestBuildProcessTagsFallbackToContainerProvider(t *testing.T) { setWorkloadInWorkloadMeta(t, mocks.workloadMeta, newContainerWorkloadID(containerID), workloadmeta.ContainerRuntimeContainerd) setWorkloadTags(t, mocks.tagger, newContainerWorkloadID(containerID), containerTags, nil, nil) - tags, err := cache.buildProcessTags(fmt.Sprintf("%d", pid)) + tags, err := cache.buildProcessTags(strconv.FormatInt(int64(pid), 10)) require.NoError(t, err) expectedTags := []string{ @@ -605,7 +605,7 @@ func TestBuildProcessTagsContainerNotFound(t *testing.T) { process := &workloadmeta.Process{ EntityID: workloadmeta.EntityID{ Kind: workloadmeta.KindProcess, - ID: fmt.Sprintf("%d", pid), + ID: strconv.FormatInt(int64(pid), 10), }, NsPid: nspid, Owner: &workloadmeta.EntityID{ @@ -617,7 +617,7 @@ func TestBuildProcessTagsContainerNotFound(t *testing.T) { // Don't set up container in workloadmeta - it will return NotFound - tags, err := cache.buildProcessTags(fmt.Sprintf("%d", pid)) + tags, err := cache.buildProcessTags(strconv.FormatInt(int64(pid), 10)) assert.NoError(t, err) // expectedTags := []string{ @@ -641,7 +641,7 @@ func TestBuildProcessTagsContainerTagsReturnsEmpty(t *testing.T) { process := &workloadmeta.Process{ EntityID: workloadmeta.EntityID{ Kind: workloadmeta.KindProcess, - ID: fmt.Sprintf("%d", pid), + ID: strconv.FormatInt(int64(pid), 10), }, NsPid: nspid, Owner: &workloadmeta.EntityID{ @@ -655,7 +655,7 @@ func TestBuildProcessTagsContainerTagsReturnsEmpty(t *testing.T) { // Don't set up tagger tags - the fake tagger returns empty tags (no error) - tags, err := cache.buildProcessTags(fmt.Sprintf("%d", pid)) + tags, err := cache.buildProcessTags(strconv.FormatInt(int64(pid), 10)) require.NoError(t, err) // Tags should include process tags but no container tags @@ -877,12 +877,12 @@ func TestBuildProcessTagsUsesCachedPidToCid(t *testing.T) { kernel.WithFakeProcFS(t, procRoot) // First process - should initialize pidToCid - tags1, err := cache.buildProcessTags(fmt.Sprintf("%d", pid1)) + tags1, err := cache.buildProcessTags(strconv.FormatInt(int64(pid1), 10)) require.NoError(t, err) assert.Contains(t, tags1, "service:service1") // Second process - should reuse cached pidToCid (mockContainerProvider.EXPECT() will fail if called again) - tags2, err := cache.buildProcessTags(fmt.Sprintf("%d", pid2)) + tags2, err := cache.buildProcessTags(strconv.FormatInt(int64(pid2), 10)) require.NoError(t, err) assert.Contains(t, tags2, "service:service2") } diff --git a/pkg/collector/corechecks/loader_test.go b/pkg/collector/corechecks/loader_test.go index 3fda048387c08b..ab1acaf9f098b1 100644 --- a/pkg/collector/corechecks/loader_test.go +++ b/pkg/collector/corechecks/loader_test.go @@ -7,7 +7,6 @@ package corechecks import ( "errors" - "fmt" "testing" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" @@ -25,7 +24,7 @@ type TestCheck struct { func (c *TestCheck) Configure(_ sender.SenderManager, _ uint64, data integration.Data, _ integration.Data, _ string) error { if string(data) == "err" { - return fmt.Errorf("testError") + return errors.New("testError") } if string(data) == "skip" { return check.ErrSkipCheckInstance diff --git a/pkg/collector/corechecks/longrunning.go b/pkg/collector/corechecks/longrunning.go index 663f48dcbe7bca..8cc2a86b9dc9d9 100644 --- a/pkg/collector/corechecks/longrunning.go +++ b/pkg/collector/corechecks/longrunning.go @@ -6,6 +6,7 @@ package corechecks import ( + "errors" "fmt" "sync" "time" @@ -43,13 +44,13 @@ func NewLongRunningCheckWrapper(check LongRunningCheck) *LongRunningCheckWrapper // If the check is already running, it will commit the sender. func (cw *LongRunningCheckWrapper) Run() error { if cw.LongRunningCheck == nil { - return fmt.Errorf("no check defined") + return errors.New("no check defined") } cw.mutex.Lock() defer cw.mutex.Unlock() if cw.stopped { - return fmt.Errorf("check already stopped") + return errors.New("check already stopped") } if cw.running { @@ -83,7 +84,7 @@ func (cw *LongRunningCheckWrapper) Interval() time.Duration { // LongRunningCheck to true. It is necessary for formatting the stats in the status page. func (cw *LongRunningCheckWrapper) GetSenderStats() (stats.SenderStats, error) { if cw.LongRunningCheck == nil { - return stats.SenderStats{}, fmt.Errorf("no check defined") + return stats.SenderStats{}, errors.New("no check defined") } s, err := cw.LongRunningCheck.GetSenderStats() if err != nil { diff --git a/pkg/collector/corechecks/longrunning_test.go b/pkg/collector/corechecks/longrunning_test.go index 6d9b6de19ffaed..7ff2bb5b477053 100644 --- a/pkg/collector/corechecks/longrunning_test.go +++ b/pkg/collector/corechecks/longrunning_test.go @@ -8,7 +8,7 @@ package corechecks import ( - "fmt" + "errors" "testing" "time" @@ -170,7 +170,7 @@ func TestLongRunningCheckWrapperRun(t *testing.T) { t.Run("Returning an error if GetSender fails while already running", func(t *testing.T) { mockCheck := newMockLongRunningCheck() - expectedErr := fmt.Errorf("failed to get sender") + expectedErr := errors.New("failed to get sender") mockCheck.On("GetSender").Return(nil, expectedErr) wrapper := NewLongRunningCheckWrapper(mockCheck) diff --git a/pkg/collector/corechecks/net/network/network.go b/pkg/collector/corechecks/net/network/network.go index e09de67554cf07..999927d219dc86 100644 --- a/pkg/collector/corechecks/net/network/network.go +++ b/pkg/collector/corechecks/net/network/network.go @@ -200,7 +200,7 @@ func (c *NetworkCheck) isDeviceExcluded(deviceName string) bool { } func submitInterfaceMetrics(sender sender.Sender, interfaceIO net.IOCountersStat) { - tags := []string{fmt.Sprintf("device:%s", interfaceIO.Name), fmt.Sprintf("device_name:%s", interfaceIO.Name)} + tags := []string{"device:" + interfaceIO.Name, "device_name:" + interfaceIO.Name} sender.Rate("system.net.bytes_rcvd", float64(interfaceIO.BytesRecv), "", tags) sender.Rate("system.net.bytes_sent", float64(interfaceIO.BytesSent), "", tags) sender.Rate("system.net.packets_in.count", float64(interfaceIO.PacketsRecv), "", tags) @@ -216,7 +216,7 @@ func submitProtocolMetrics(sender sender.Sender, protocolStats net.ProtoCounters for rawMetricName, metricName := range protocolMapping { if metricValue, ok := protocolStats.Stats[rawMetricName]; ok { sender.Rate(metricName, float64(metricValue), "", nil) - sender.MonotonicCount(fmt.Sprintf("%s.count", metricName), float64(metricValue), "", nil) + sender.MonotonicCount(metricName+".count", float64(metricValue), "", nil) } } } diff --git a/pkg/collector/corechecks/net/networkv2/network.go b/pkg/collector/corechecks/net/networkv2/network.go index a66babcff20fc3..cb15a688dd0b03 100644 --- a/pkg/collector/corechecks/net/networkv2/network.go +++ b/pkg/collector/corechecks/net/networkv2/network.go @@ -137,7 +137,7 @@ func (n defaultNetworkStats) GetNetProcBasePath() string { netProcfsPath := n.procPath // in a containerized environment if os.Getenv("DOCKER_DD_AGENT") != "" && netProcfsPath != "/proc" { - netProcfsPath = fmt.Sprintf("%s/1", netProcfsPath) + netProcfsPath = netProcfsPath + "/1" } return netProcfsPath } @@ -227,7 +227,7 @@ func submitInterfaceSysMetrics(sender sender.Sender) { return } for _, iface := range ifaces { - ifaceTag := []string{fmt.Sprintf("iface:%s", iface.Name())} + ifaceTag := []string{"iface:" + iface.Name()} for _, metricName := range sysNetMetrics { metricFileName := metricName if metricName == "up" { @@ -238,7 +238,7 @@ func submitInterfaceSysMetrics(sender sender.Sender) { if err != nil { log.Debugf("Unable to read %s, skipping: %s.", metricFilepath, err) } - sender.Gauge(fmt.Sprintf("system.net.iface.%s", metricName), float64(val), "", ifaceTag) + sender.Gauge("system.net.iface."+metricName, float64(val), "", ifaceTag) } queuesFilepath := filepath.Join(sysNetLocation, iface.Name(), "queues") queues, err := afero.ReadDir(filesystem, queuesFilepath) @@ -260,14 +260,14 @@ func submitInterfaceSysMetrics(sender sender.Sender) { } func submitInterfaceMetrics(sender sender.Sender, interfaceIO net.IOCountersStat) { - tags := []string{fmt.Sprintf("device:%s", interfaceIO.Name), fmt.Sprintf("device_name:%s", interfaceIO.Name)} + tags := []string{"device:" + interfaceIO.Name, "device_name:" + interfaceIO.Name} speedVal, err := readIntFile(fmt.Sprintf("/sys/class/net/%s/speed", interfaceIO.Name), filesystem) if err == nil { - tags = append(tags, fmt.Sprintf("speed:%s", strconv.Itoa(speedVal))) + tags = append(tags, "speed:"+strconv.Itoa(speedVal)) } mtuVal, err := readIntFile(fmt.Sprintf("/sys/class/net/%s/mtu", interfaceIO.Name), filesystem) if err == nil { - tags = append(tags, fmt.Sprintf("mtu:%s", strconv.Itoa(mtuVal))) + tags = append(tags, "mtu:"+strconv.Itoa(mtuVal)) } sender.Rate("system.net.bytes_rcvd", float64(interfaceIO.BytesRecv), "", tags) sender.Rate("system.net.bytes_sent", float64(interfaceIO.BytesSent), "", tags) @@ -326,7 +326,7 @@ func handleEthtoolStats(sender sender.Sender, ethtoolObject ethtoolInterface, in count := 0 for metricName, metricValue := range enaMetrics { - metricName := fmt.Sprintf("system.net.%s", metricName) + metricName := "system.net." + metricName sender.Gauge(metricName, float64(metricValue), "", tags) count++ } @@ -344,7 +344,7 @@ func handleEthtoolStats(sender sender.Sender, ethtoolObject ethtoolInterface, in } for metricName, metricValue := range keyValuePairing { - metricName := fmt.Sprintf("system.net.%s", metricName) + metricName := "system.net." + metricName sender.MonotonicCount(metricName, float64(metricValue), "", tags) } } @@ -506,7 +506,7 @@ func (c *NetworkCheck) submitProtocolMetrics(sender sender.Sender, protocolStats sender.Rate(metricName, float64(metricValue), "", nil) } if c.config.instance.CollectCountMetrics { - sender.MonotonicCount(fmt.Sprintf("%s.count", metricName), float64(metricValue), "", nil) + sender.MonotonicCount(metricName+".count", float64(metricValue), "", nil) } } } @@ -529,8 +529,8 @@ func getSocketStateMetrics(protocol string, procfsPath string) (map[string]*conn // Also calls `ss` for each protocol, because on some systems (e.g. Ubuntu 14.04), there is a bug that print `tcp` even if it's `udp` // The `-H` flag isn't available on old versions of `ss`. - ipFlag := fmt.Sprintf("--ipv%s", protocol[len(protocol)-1:]) - protocolFlag := fmt.Sprintf("--%s", protocol[:len(protocol)-1]) + ipFlag := "--ipv" + protocol[len(protocol)-1:] + protocolFlag := "--" + protocol[:len(protocol)-1] // Go's exec.Command environment is the same as the running process unlike python so we do not need to adjust the PATH cmd := fmt.Sprintf("ss --numeric %s --all %s", protocolFlag, ipFlag) output, err := runCommandFunction([]string{"sh", "-c", cmd}, env) diff --git a/pkg/collector/corechecks/net/networkv2/network_windows.go b/pkg/collector/corechecks/net/networkv2/network_windows.go index a5fbcd2f3d023f..1435c0930a9318 100644 --- a/pkg/collector/corechecks/net/networkv2/network_windows.go +++ b/pkg/collector/corechecks/net/networkv2/network_windows.go @@ -276,7 +276,7 @@ func (c *NetworkCheck) submitMetricsFromStruct(sender sender.Sender, metricPrefi sender.Rate(metricName, float64(metricValue), "", nil) } if c.config.instance.CollectCountMetrics { - sender.MonotonicCount(fmt.Sprintf("%s.count", metricName), float64(metricValue), "", nil) + sender.MonotonicCount(metricName+".count", float64(metricValue), "", nil) } } } @@ -293,7 +293,7 @@ func (c *NetworkCheck) isDeviceExcluded(deviceName string) bool { } func submitInterfaceMetrics(sender sender.Sender, interfaceIO net.IOCountersStat) { - tags := []string{fmt.Sprintf("device:%s", interfaceIO.Name)} + tags := []string{"device:" + interfaceIO.Name} sender.Rate("system.net.bytes_rcvd", float64(interfaceIO.BytesRecv), "", tags) sender.Rate("system.net.bytes_sent", float64(interfaceIO.BytesSent), "", tags) sender.Rate("system.net.packets_in.count", float64(interfaceIO.PacketsRecv), "", tags) diff --git a/pkg/collector/corechecks/net/ntp/ntp_test.go b/pkg/collector/corechecks/net/ntp/ntp_test.go index 54ced5d537e666..1b05ab43725563 100644 --- a/pkg/collector/corechecks/net/ntp/ntp_test.go +++ b/pkg/collector/corechecks/net/ntp/ntp_test.go @@ -7,6 +7,7 @@ package ntp import ( "context" + "errors" "fmt" "strconv" "testing" @@ -42,7 +43,7 @@ hosts: [ 0.datadog.pool.ntp.org, 1.datadog.pool.ntp.org, 2.datadog.pool.ntp.org, ) func testNTPQueryError(_ string, _ ntp.QueryOptions) (*ntp.Response, error) { - return nil, fmt.Errorf("test error from NTP") + return nil, errors.New("test error from NTP") } func testNTPQueryInvalid(_ string, _ ntp.QueryOptions) (*ntp.Response, error) { diff --git a/pkg/collector/corechecks/net/wlan/wlan_nix.go b/pkg/collector/corechecks/net/wlan/wlan_nix.go index 8beb719b6b1112..324b0efffafdac 100644 --- a/pkg/collector/corechecks/net/wlan/wlan_nix.go +++ b/pkg/collector/corechecks/net/wlan/wlan_nix.go @@ -8,8 +8,8 @@ //nolint:revive // TODO(PLINT) Fix revive linter package wlan -import "fmt" +import "errors" func GetWiFiInfo() (wifiInfo, error) { - return wifiInfo{}, fmt.Errorf("wifi info only supported on macOS and Windows") + return wifiInfo{}, errors.New("wifi info only supported on macOS and Windows") } diff --git a/pkg/collector/corechecks/net/wlan/wlan_windows.go b/pkg/collector/corechecks/net/wlan/wlan_windows.go index bbb607dc03dc4e..40fc6d9d0764d0 100644 --- a/pkg/collector/corechecks/net/wlan/wlan_windows.go +++ b/pkg/collector/corechecks/net/wlan/wlan_windows.go @@ -9,6 +9,7 @@ package wlan import ( + "errors" "fmt" "strings" "unsafe" @@ -416,7 +417,7 @@ func strToGUID(guidStr string) (*windows.GUID, error) { var guid windows.GUID ret, _, _ := clsidFromString.Call(uintptr(unsafe.Pointer(guidWStr)), uintptr(unsafe.Pointer(&guid))) if ret != 0 { - return nil, fmt.Errorf("clsidFromString failed") + return nil, errors.New("clsidFromString failed") } return &guid, nil diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/auth.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/auth.go index 0511c5caa49fab..888d75f7989980 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/auth.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/auth.go @@ -6,6 +6,7 @@ package client import ( + "errors" "fmt" "io" "net/http" @@ -44,7 +45,7 @@ func (client *Client) login() error { } if len(bodyBytes) > 0 { - return fmt.Errorf("invalid credentials") + return errors.New("invalid credentials") } // Request to /dataservice/client/token to obtain csrf prevention token @@ -65,7 +66,7 @@ func (client *Client) login() error { token, _ := io.ReadAll(tokenRes.Body) if string(token) == "" { - return fmt.Errorf("no csrf prevention token in payload") + return errors.New("no csrf prevention token in payload") } client.token = string(token) diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go index 636a599ead969a..66fa73dacb40a0 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/client.go @@ -9,11 +9,12 @@ package client import ( "crypto/tls" "crypto/x509" - "fmt" + "errors" "net/http" "net/http/cookiejar" "net/url" "os" + "strconv" "sync" "time" ) @@ -98,13 +99,13 @@ func NewClient(endpoint, username, password string, useHTTP bool, options ...Cli func validateParams(endpoint, username, password string) error { if endpoint == "" { - return fmt.Errorf("invalid endpoint") + return errors.New("invalid endpoint") } if username == "" { - return fmt.Errorf("invalid username") + return errors.New("invalid username") } if password == "" { - return fmt.Errorf("invalid password") + return errors.New("invalid password") } return nil } @@ -150,7 +151,7 @@ func WithMaxAttempts(maxAttempts int) ClientOptions { // WithMaxCount is a functional option to set the client max count func WithMaxCount(maxCount int) ClientOptions { return func(c *Client) { - c.maxCount = fmt.Sprintf("%d", maxCount) + c.maxCount = strconv.Itoa(maxCount) } } diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/request.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/request.go index 518caf0aa209c6..01e44af7583006 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/client/request.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/client/request.go @@ -7,6 +7,7 @@ package client import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -114,7 +115,7 @@ func getMoreEntries[T Content](client *Client, endpoint string, pageInfo PageInf for page := 0; currentPageInfo.MoreEntries || currentPageInfo.HasMoreData; page++ { // Error if max number of pages is reached if page >= client.maxPages { - return nil, fmt.Errorf("max number of page reached, increase API count or max number of pages") + return nil, errors.New("max number of page reached, increase API count or max number of pages") } log.Tracef("Getting page %d from endpoint %s", page+1+1, endpoint) @@ -151,7 +152,7 @@ func getNextPaginationParams(info PageInfo, count string) (map[string]string, er newParams["scrollId"] = info.ScrollID return newParams, nil } - return nil, fmt.Errorf("could not build next page params") + return nil, errors.New("could not build next page params") } // getAllEntries gets all entries from paginated endpoints diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/payload/cedge_interface.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/payload/cedge_interface.go index ab6ac11187aee1..ad058ddfd4de09 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/payload/cedge_interface.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/payload/cedge_interface.go @@ -6,6 +6,7 @@ package payload import ( + "errors" "fmt" "net" "strconv" @@ -84,7 +85,7 @@ func (itf *CEdgeInterface) Metadata(namespace string) (devicemetadata.InterfaceM return devicemetadata.InterfaceMetadata{ DeviceID: fmt.Sprintf("%s:%s", namespace, itf.VmanageSystemIP), // VmanageSystemIP is the device's System IP from vManage - IDTags: []string{fmt.Sprintf("interface:%s", itf.Ifname)}, + IDTags: []string{"interface:" + itf.Ifname}, Index: index, Name: itf.Ifname, Description: itf.Description, @@ -151,7 +152,7 @@ func isEmptyCEdgeIP(ip string) bool { func parseCEdgeIP(ip string) (string, error) { ipAddr := net.ParseIP(ip) if ipAddr == nil || ipAddr.IsUnspecified() { - return "", fmt.Errorf("invalid ip address") + return "", errors.New("invalid ip address") } return ipAddr.String(), nil } @@ -159,7 +160,7 @@ func parseCEdgeIP(ip string) (string, error) { func parseMask(mask string) (int32, error) { ipMask := net.ParseIP(mask) if ipMask == nil { - return 0, fmt.Errorf("invalid mask") + return 0, errors.New("invalid mask") } parsedMask := net.IPMask(ipMask.To4()) prefixLen, _ := parsedMask.Size() diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/payload/vedge_interface.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/payload/vedge_interface.go index 7f8d5e34635824..ae5e78853e197f 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/payload/vedge_interface.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/payload/vedge_interface.go @@ -6,6 +6,7 @@ package payload import ( + "errors" "fmt" "net" "strconv" @@ -64,7 +65,7 @@ func (itf *VEdgeInterface) AdminStatus() devicemetadata.IfAdminStatus { func (itf *VEdgeInterface) Metadata(namespace string) (devicemetadata.InterfaceMetadata, error) { return devicemetadata.InterfaceMetadata{ DeviceID: fmt.Sprintf("%s:%s", namespace, itf.VmanageSystemIP), // VmanageSystemIP is the device's System IP from vManage - IDTags: []string{fmt.Sprintf("interface:%s", itf.Ifname)}, + IDTags: []string{"interface:" + itf.Ifname}, Index: int32(itf.Ifindex), Name: itf.Ifname, Description: itf.Desc, @@ -108,7 +109,7 @@ func parseVEdgeIP(ip string) (string, int32, error) { } if ipaddr.IsUnspecified() { - return "", 0, fmt.Errorf("IP address is unspecified") + return "", 0, errors.New("IP address is unspecified") } prefixLen, _ := ipv4Net.Mask.Size() diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/report/metrics.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/report/metrics.go index 42962454f52ed8..bc63baa5be969c 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/report/metrics.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/report/metrics.go @@ -245,7 +245,7 @@ func (ms *SDWanSender) SendDeviceStatusMetrics(deviceStatus map[string]float64) // SendHardwareMetrics sends hardware metrics func (ms *SDWanSender) SendHardwareMetrics(hardwareEnvironments []client.HardwareEnvironment) { for _, entry := range hardwareEnvironments { - devIndex := fmt.Sprintf("%d", entry.HwDevIndex) + devIndex := strconv.Itoa(entry.HwDevIndex) tags := ms.getDeviceTags(entry.VmanageSystemIP) tags = append(tags, "status:"+entry.Status, "class:"+entry.HwClass, "item:"+entry.HwItem, "dev_index:"+devIndex) @@ -267,7 +267,7 @@ func (ms *SDWanSender) SendCloudApplicationMetrics(cloudApplications []client.Cl gatewayTags := ms.getPrefixedDeviceTags("gateway_", entry.GatewaySystemIP) tags = append(tags, gatewayTags...) - tags = append(tags, "local_color:"+entry.LocalColor, "remote_color:"+entry.RemoteColor, "interface:"+entry.Interface, "exit_type:"+entry.ExitType, "application_group:"+entry.NbarAppGroupName, "application:"+entry.Application, "best_path:"+entry.BestPath, "vpn_id:"+fmt.Sprintf("%d", int(entry.VpnID))) + tags = append(tags, "local_color:"+entry.LocalColor, "remote_color:"+entry.RemoteColor, "interface:"+entry.Interface, "exit_type:"+entry.ExitType, "application_group:"+entry.NbarAppGroupName, "application:"+entry.Application, "best_path:"+entry.BestPath, "vpn_id:"+strconv.Itoa(int(entry.VpnID))) key := ms.getMetricKey("application_metrics", entry.VmanageSystemIP, entry.GatewaySystemIP, entry.LocalColor, entry.RemoteColor, entry.Application) if !ms.shouldSendEntry(key, entry.EntryTime) { @@ -293,8 +293,8 @@ func (ms *SDWanSender) SendCloudApplicationMetrics(cloudApplications []client.Cl // SendBGPNeighborMetrics sends hardware metrics func (ms *SDWanSender) SendBGPNeighborMetrics(bgpNeighbors []client.BGPNeighbor) { for _, entry := range bgpNeighbors { - as := fmt.Sprintf("%d", int(entry.AS)) - vpnID := fmt.Sprintf("%d", int(entry.VpnID)) + as := strconv.Itoa(int(entry.AS)) + vpnID := strconv.Itoa(int(entry.VpnID)) tags := ms.getDeviceTags(entry.VmanageSystemIP) tags = append(tags, "peer_state:"+entry.State, "remote_as:"+as, "neighbor:"+entry.PeerAddr, "vpn_id:"+vpnID, "afi:"+entry.Afi) diff --git a/pkg/collector/corechecks/network-devices/versa/client/auth.go b/pkg/collector/corechecks/network-devices/versa/client/auth.go index 30266f7aabaf6d..0071cb438c0a92 100644 --- a/pkg/collector/corechecks/network-devices/versa/client/auth.go +++ b/pkg/collector/corechecks/network-devices/versa/client/auth.go @@ -8,6 +8,7 @@ package client import ( "bytes" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -76,10 +77,10 @@ func (a *authMethod) Parse(authString string) error { // processAuthConfig validates and parses the authentication configuration func processAuthConfig(config AuthConfig) (authMethod, error) { if config.Username == "" { - return "", fmt.Errorf("username is required") + return "", errors.New("username is required") } if config.Password == "" { - return "", fmt.Errorf("password is required") + return "", errors.New("password is required") } // Parse and validate the auth method (if provided) @@ -94,7 +95,7 @@ func processAuthConfig(config AuthConfig) (authMethod, error) { // Validate OAuth specific requirements if authMethod == authMethodOAuth { if config.ClientID == "" || config.ClientSecret == "" { - return "", fmt.Errorf("client_id and client_secret are required for OAuth authentication") + return "", errors.New("client_id and client_secret are required for OAuth authentication") } } diff --git a/pkg/collector/corechecks/network-devices/versa/client/client.go b/pkg/collector/corechecks/network-devices/versa/client/client.go index 60ed12b2f79fd6..b47660bab179ae 100644 --- a/pkg/collector/corechecks/network-devices/versa/client/client.go +++ b/pkg/collector/corechecks/network-devices/versa/client/client.go @@ -136,13 +136,13 @@ func NewClient(directorEndpoint string, directorPort int, analyticsEndpoint stri func validateParams(directorEndpoint string, directorPort int, analyticsEndpoint string) error { if directorEndpoint == "" { - return fmt.Errorf("invalid director endpoint") + return errors.New("invalid director endpoint") } if directorPort < 0 { return fmt.Errorf("invalid director port: %d", directorPort) } if analyticsEndpoint == "" { - return fmt.Errorf("invalid analytics endpoint") + return errors.New("invalid analytics endpoint") } return nil } @@ -188,7 +188,7 @@ func WithMaxAttempts(maxAttempts int) ClientOptions { // WithMaxCount is a functional option to set the client max count func WithMaxCount(maxCount int) ClientOptions { return func(c *Client) { - c.maxCount = fmt.Sprintf("%d", maxCount) + c.maxCount = strconv.Itoa(maxCount) } } @@ -262,7 +262,7 @@ func (client *Client) GetChildAppliancesDetail(tenant string) ([]Appliance, erro totalPages := (*totalCount + maxCount - 1) / maxCount // calculate total pages, rounding up if there's any remainder for i := 0; i < totalPages; i++ { params["fetch"] = "all" - params["offset"] = fmt.Sprintf("%d", i*maxCount) + params["offset"] = strconv.Itoa(i * maxCount) resp, err := get[[]Appliance](client, uri, params, false) if err != nil { return nil, fmt.Errorf("failed to get appliance detail response: %v", err) @@ -330,7 +330,7 @@ func (client *Client) GetAppliances() ([]Appliance, error) { // GetInterfaces retrieves a list of interfaces for a specific tenant func (client *Client) GetInterfaces(tenantName string) ([]Interface, error) { if tenantName == "" { - return nil, fmt.Errorf("tenantName cannot be empty") + return nil, errors.New("tenantName cannot be empty") } params := map[string]string{ @@ -351,10 +351,10 @@ func (client *Client) GetInterfaces(tenantName string) ([]Interface, error) { // GetInterfaceMetrics retrieves interface metrics for a specific appliance and tenant using pagination func (client *Client) GetInterfaceMetrics(applianceName string, tenantName string) ([]InterfaceMetrics, error) { if applianceName == "" { - return nil, fmt.Errorf("applianceName cannot be empty") + return nil, errors.New("applianceName cannot be empty") } if tenantName == "" { - return nil, fmt.Errorf("tenantName cannot be empty") + return nil, errors.New("tenantName cannot be empty") } var allMetrics []InterfaceMetrics @@ -604,7 +604,7 @@ func (client *Client) GetTopUsers(tenant string) ([]TopUserMetrics, error) { // GetTunnelMetrics retrieves tunnel metrics from the Versa Analytics API func (client *Client) GetTunnelMetrics(tenant string) ([]TunnelMetrics, error) { if tenant == "" { - return nil, fmt.Errorf("tenant cannot be empty") + return nil, errors.New("tenant cannot be empty") } return getPaginatedAnalytics( @@ -626,7 +626,7 @@ func (client *Client) GetTunnelMetrics(tenant string) ([]TunnelMetrics, error) { // GetDIAMetrics retrieves DIA (Direct Internet Access) metrics from the Versa Analytics API func (client *Client) GetDIAMetrics(tenant string) ([]DIAMetrics, error) { if tenant == "" { - return nil, fmt.Errorf("tenant cannot be empty") + return nil, errors.New("tenant cannot be empty") } return getPaginatedAnalytics( @@ -650,7 +650,7 @@ func (client *Client) GetDIAMetrics(tenant string) ([]DIAMetrics, error) { // GetAnalyticsInterfaces retrieves interface utilization metrics from the Versa Analytics API func (client *Client) GetAnalyticsInterfaces(tenant string) ([]AnalyticsInterfaceMetrics, error) { if tenant == "" { - return nil, fmt.Errorf("tenant cannot be empty") + return nil, errors.New("tenant cannot be empty") } return getPaginatedAnalytics( diff --git a/pkg/collector/corechecks/network-devices/versa/client/parser.go b/pkg/collector/corechecks/network-devices/versa/client/parser.go index f52bcbfc9f327e..3c0811c339be43 100644 --- a/pkg/collector/corechecks/network-devices/versa/client/parser.go +++ b/pkg/collector/corechecks/network-devices/versa/client/parser.go @@ -7,6 +7,7 @@ package client import ( + "errors" "fmt" ) @@ -21,22 +22,22 @@ func parseSLAMetrics(data [][]interface{}) ([]SLAMetrics, error) { // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for CombinedKey") + return nil, errors.New("expected string for CombinedKey") } if m.LocalSite, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for LocalSite") + return nil, errors.New("expected string for LocalSite") } if m.RemoteSite, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for RemoteSite") + return nil, errors.New("expected string for RemoteSite") } if m.LocalAccessCircuit, ok = row[3].(string); !ok { - return nil, fmt.Errorf("expected string for LocalCircuit") + return nil, errors.New("expected string for LocalCircuit") } if m.RemoteAccessCircuit, ok = row[4].(string); !ok { - return nil, fmt.Errorf("expected string for RemoteCircuit") + return nil, errors.New("expected string for RemoteCircuit") } if m.ForwardingClass, ok = row[5].(string); !ok { - return nil, fmt.Errorf("expected string for ForwardingClass") + return nil, errors.New("expected string for ForwardingClass") } // Floats from index 6–11 @@ -67,13 +68,13 @@ func parseApplicationsByApplianceMetrics(data [][]interface{}) ([]ApplicationsBy // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for DrillKey") + return nil, errors.New("expected string for DrillKey") } if m.Site, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for Site") + return nil, errors.New("expected string for Site") } if m.AppID, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for AppId") + return nil, errors.New("expected string for AppId") } // Floats from index 3–8 @@ -104,16 +105,16 @@ func parseLinkStatusMetrics(data [][]interface{}) ([]LinkStatusMetrics, error) { // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for DrillKey") + return nil, errors.New("expected string for DrillKey") } if m.Site, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for Site") + return nil, errors.New("expected string for Site") } if m.AccessCircuit, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for AccessCircuit") + return nil, errors.New("expected string for AccessCircuit") } if m.Availability, ok = row[3].(float64); !ok { - return nil, fmt.Errorf("expected float64 for Availability") + return nil, errors.New("expected float64 for Availability") } rows = append(rows, m) } @@ -131,31 +132,31 @@ func parseLinkUsageMetrics(data [][]interface{}) ([]LinkUsageMetrics, error) { // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for DrillKey") + return nil, errors.New("expected string for DrillKey") } if m.Site, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for Site") + return nil, errors.New("expected string for Site") } if m.AccessCircuit, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for AccessCircuit") + return nil, errors.New("expected string for AccessCircuit") } if m.UplinkBandwidth, ok = row[3].(string); !ok { - return nil, fmt.Errorf("expected string for UplinkBandwidth") + return nil, errors.New("expected string for UplinkBandwidth") } if m.DownlinkBandwidth, ok = row[4].(string); !ok { - return nil, fmt.Errorf("expected string for DownlinkBandwidth") + return nil, errors.New("expected string for DownlinkBandwidth") } if m.Type, ok = row[5].(string); !ok { - return nil, fmt.Errorf("expected string for Type") + return nil, errors.New("expected string for Type") } if m.Media, ok = row[6].(string); !ok { - return nil, fmt.Errorf("expected string for Media") + return nil, errors.New("expected string for Media") } if m.IP, ok = row[7].(string); !ok { - return nil, fmt.Errorf("expected string for IP") + return nil, errors.New("expected string for IP") } if m.ISP, ok = row[8].(string); !ok { - return nil, fmt.Errorf("expected string for ISP") + return nil, errors.New("expected string for ISP") } // Floats from index 9–12 @@ -185,19 +186,19 @@ func parseSiteMetrics(data [][]interface{}) ([]SiteMetrics, error) { // Type assertions for each value var ok bool if m.Site, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for Site") + return nil, errors.New("expected string for Site") } if m.Address, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for Address") + return nil, errors.New("expected string for Address") } if m.Latitude, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for Latitude") + return nil, errors.New("expected string for Latitude") } if m.Longitude, ok = row[3].(string); !ok { - return nil, fmt.Errorf("expected string for Longitude") + return nil, errors.New("expected string for Longitude") } if m.LocationSource, ok = row[4].(string); !ok { - return nil, fmt.Errorf("expected string for LocationSource") + return nil, errors.New("expected string for LocationSource") } // Floats from index 5–9 (5 float fields) @@ -228,13 +229,13 @@ func parseTopUserMetrics(data [][]interface{}) ([]TopUserMetrics, error) { // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for DrillKey") + return nil, errors.New("expected string for DrillKey") } if m.Site, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for Site") + return nil, errors.New("expected string for Site") } if m.User, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for User") + return nil, errors.New("expected string for User") } // Floats from index 3–8 @@ -266,31 +267,31 @@ func parseTunnelMetrics(data [][]interface{}) ([]TunnelMetrics, error) { // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for DrillKey") + return nil, errors.New("expected string for DrillKey") } if m.Appliance, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for Appliance") + return nil, errors.New("expected string for Appliance") } if m.LocalIP, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for LocalIP") + return nil, errors.New("expected string for LocalIP") } if m.RemoteIP, ok = row[3].(string); !ok { - return nil, fmt.Errorf("expected string for RemoteIP") + return nil, errors.New("expected string for RemoteIP") } if m.VpnProfName, ok = row[4].(string); !ok { - return nil, fmt.Errorf("expected string for VpnProfName") + return nil, errors.New("expected string for VpnProfName") } // Handle float metrics from indices 5-6 if val, ok := row[5].(float64); ok { m.VolumeRx = val } else { - return nil, fmt.Errorf("expected float64 for VolumeRx at index 5") + return nil, errors.New("expected float64 for VolumeRx at index 5") } if val, ok := row[6].(float64); ok { m.VolumeTx = val } else { - return nil, fmt.Errorf("expected float64 for VolumeTx at index 6") + return nil, errors.New("expected float64 for VolumeTx at index 6") } rows = append(rows, m) } @@ -308,13 +309,13 @@ func parsePathQoSMetrics(data [][]interface{}) ([]QoSMetrics, error) { // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for DrillKey") + return nil, errors.New("expected string for DrillKey") } if m.LocalSiteName, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for LocalSiteName") + return nil, errors.New("expected string for LocalSiteName") } if m.RemoteSiteName, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for RemoteSiteName") + return nil, errors.New("expected string for RemoteSiteName") } // Floats from index 3–18 (16 float fields) @@ -347,16 +348,16 @@ func parseDIAMetrics(data [][]interface{}) ([]DIAMetrics, error) { // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for DrillKey") + return nil, errors.New("expected string for DrillKey") } if m.Site, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for Site") + return nil, errors.New("expected string for Site") } if m.AccessCircuit, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for AccessCircuit") + return nil, errors.New("expected string for AccessCircuit") } if m.IP, ok = row[3].(string); !ok { - return nil, fmt.Errorf("expected string for IP") + return nil, errors.New("expected string for IP") } // Floats from index 4–7 (4 float fields) @@ -386,16 +387,16 @@ func parseAnalyticsInterfaceMetrics(data [][]interface{}) ([]AnalyticsInterfaceM // Type assertions for each value var ok bool if m.DrillKey, ok = row[0].(string); !ok { - return nil, fmt.Errorf("expected string for DrillKey") + return nil, errors.New("expected string for DrillKey") } if m.Site, ok = row[1].(string); !ok { - return nil, fmt.Errorf("expected string for Site") + return nil, errors.New("expected string for Site") } if m.AccessCkt, ok = row[2].(string); !ok { - return nil, fmt.Errorf("expected string for AccessCkt") + return nil, errors.New("expected string for AccessCkt") } if m.Interface, ok = row[3].(string); !ok { - return nil, fmt.Errorf("expected string for Interface") + return nil, errors.New("expected string for Interface") } // Floats from index 4–11 (8 float fields) diff --git a/pkg/collector/corechecks/network-devices/versa/client/request_test.go b/pkg/collector/corechecks/network-devices/versa/client/request_test.go index a12833f7278106..015daa4103012f 100644 --- a/pkg/collector/corechecks/network-devices/versa/client/request_test.go +++ b/pkg/collector/corechecks/network-devices/versa/client/request_test.go @@ -8,6 +8,7 @@ package client import ( + "errors" "fmt" "net/http" "net/http/httptest" @@ -33,12 +34,12 @@ func parseTestMetrics(data [][]interface{}) ([]TestMetric, error) { id, ok := row[0].(string) if !ok { - return nil, fmt.Errorf("expected string for ID") + return nil, errors.New("expected string for ID") } value, ok := row[1].(float64) if !ok { - return nil, fmt.Errorf("expected float64 for Value") + return nil, errors.New("expected float64 for Value") } metrics = append(metrics, TestMetric{ID: id, Value: value}) diff --git a/pkg/collector/corechecks/network-devices/versa/client/types.go b/pkg/collector/corechecks/network-devices/versa/client/types.go index c37d50e8722dde..121d1c1b9d49d0 100644 --- a/pkg/collector/corechecks/network-devices/versa/client/types.go +++ b/pkg/collector/corechecks/network-devices/versa/client/types.go @@ -6,7 +6,7 @@ // Package client implements a Versa API client package client -import "fmt" +import "errors" // Content encapsulates the content types of the Versa API type Content interface { @@ -357,7 +357,7 @@ func (d *DirectorStatus) IPAddress() (string, error) { return d.HAConfig.MyAddress, nil } if len(d.HAConfig.MyVnfManagementIPs) == 0 { - return "", fmt.Errorf("no management IPs found for director") + return "", errors.New("no management IPs found for director") } return d.HAConfig.MyVnfManagementIPs[0], nil } diff --git a/pkg/collector/corechecks/network-devices/versa/report/sender.go b/pkg/collector/corechecks/network-devices/versa/report/sender.go index 74b0c40c043427..a695d3b881d4b2 100644 --- a/pkg/collector/corechecks/network-devices/versa/report/sender.go +++ b/pkg/collector/corechecks/network-devices/versa/report/sender.go @@ -7,6 +7,7 @@ package report import ( + "errors" "fmt" "math" "regexp" @@ -148,11 +149,11 @@ func (s *Sender) SendUptimeMetrics(uptimes map[string]float64) { func (s *Sender) SendSLAMetrics(slaMetrics []client.SLAMetrics, deviceNameToIDMap map[string]string) { for _, slaMetricsResponse := range slaMetrics { var tags = []string{ - fmt.Sprintf("local_site:%s", slaMetricsResponse.LocalSite), - fmt.Sprintf("remote_site:%s", slaMetricsResponse.RemoteSite), - fmt.Sprintf("local_access_circuit:%s", slaMetricsResponse.LocalAccessCircuit), - fmt.Sprintf("remote_access_circuit:%s", slaMetricsResponse.RemoteAccessCircuit), - fmt.Sprintf("forwarding_class:%s", slaMetricsResponse.ForwardingClass), + "local_site:" + slaMetricsResponse.LocalSite, + "remote_site:" + slaMetricsResponse.RemoteSite, + "local_access_circuit:" + slaMetricsResponse.LocalAccessCircuit, + "remote_access_circuit:" + slaMetricsResponse.RemoteAccessCircuit, + "forwarding_class:" + slaMetricsResponse.ForwardingClass, } if deviceIP, ok := deviceNameToIDMap[slaMetricsResponse.LocalSite]; ok { tags = append(tags, s.GetDeviceTags(defaultIPTag, deviceIP)...) @@ -696,7 +697,7 @@ func parseDiskUsage(diskUsage string) ([]partition, error) { // parseMetricValue parses a string metric value to float64 func parseMetricValue(value string) (float64, error) { if value == "" { - return 0, fmt.Errorf("empty metric value") + return 0, errors.New("empty metric value") } return strconv.ParseFloat(value, 64) } diff --git a/pkg/collector/corechecks/networkconfigmanagement/networkconfigmanagement_test.go b/pkg/collector/corechecks/networkconfigmanagement/networkconfigmanagement_test.go index 786c21c90c4b38..d51400411f2e71 100644 --- a/pkg/collector/corechecks/networkconfigmanagement/networkconfigmanagement_test.go +++ b/pkg/collector/corechecks/networkconfigmanagement/networkconfigmanagement_test.go @@ -9,6 +9,7 @@ package networkconfigmanagement import ( "encoding/json" + "errors" "fmt" "regexp" "testing" @@ -300,7 +301,7 @@ func TestCheck_Run_ConnectionFailure(t *testing.T) { require.NoError(t, err) // Set up mock remote client factory that fails to connect - connectionError := fmt.Errorf("connection refused") + connectionError := errors.New("connection refused") client := newMockRemoteClient() client.ConnectionError = connectionError @@ -322,7 +323,7 @@ func TestCheck_Run_ConfigRetrievalFailure(t *testing.T) { // Set up a mock remote client that fails config retrieval mockClient := &MockRemoteClient{ - ConfigError: fmt.Errorf("command execution failed"), + ConfigError: errors.New("command execution failed"), } check.remoteClient = mockClient @@ -433,7 +434,7 @@ func getRunningScrubber() *scrubber.Scrubber { sc := scrubber.New() sc.AddReplacer(scrubber.SingleLine, scrubber.Replacer{ Regex: regexp.MustCompile(`(username .+ (password|secret) \d) .+`), - Repl: []byte(fmt.Sprintf(`$1 %s`, "")), + Repl: []byte("$1 " + ""), }) return sc } diff --git a/pkg/collector/corechecks/networkpath/config.go b/pkg/collector/corechecks/networkpath/config.go index 15cd7338819948..60c01528e2540c 100644 --- a/pkg/collector/corechecks/networkpath/config.go +++ b/pkg/collector/corechecks/networkpath/config.go @@ -6,6 +6,7 @@ package networkpath import ( + "errors" "fmt" "strings" "time" @@ -103,7 +104,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data // hostname validation is done by the datadog-traceroute library but an empty hostname results in querying system-probe with an invalid URL if instance.DestHostname == "" { - return nil, fmt.Errorf("invalid instance config, hostname must be provided") + return nil, errors.New("invalid instance config, hostname must be provided") } c := &CheckConfig{} @@ -123,7 +124,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data defaultCheckInterval, ) if c.MinCollectionInterval <= 0 { - return nil, fmt.Errorf("min collection interval must be > 0") + return nil, errors.New("min collection interval must be > 0") } c.Timeout = firstNonZero( @@ -132,7 +133,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data setup.DefaultNetworkPathTimeout*time.Millisecond, ) if c.Timeout <= 0 { - return nil, fmt.Errorf("timeout must be > 0") + return nil, errors.New("timeout must be > 0") } c.MaxTTL = firstNonZero( diff --git a/pkg/collector/corechecks/nvidia/jetson/powerMetricsSender.go b/pkg/collector/corechecks/nvidia/jetson/powerMetricsSender.go index 9670ee2a37840f..da303a4794cd29 100644 --- a/pkg/collector/corechecks/nvidia/jetson/powerMetricsSender.go +++ b/pkg/collector/corechecks/nvidia/jetson/powerMetricsSender.go @@ -9,7 +9,6 @@ package nvidia import ( "errors" - "fmt" "regexp" "strconv" @@ -38,7 +37,7 @@ func (voltageMetricsSender *voltageMetricsSender) SendMetrics(sender sender.Send } for i := 0; i < len(voltageFields); i++ { - voltageProbeTags := []string{fmt.Sprintf("probe:%s", voltageFields[i][regexSubexpIndex(r, "voltageProbeName")])} + voltageProbeTags := []string{"probe:" + voltageFields[i][regexSubexpIndex(r, "voltageProbeName")]} instantVoltage, err := strconv.ParseFloat(voltageFields[i][regexSubexpIndex(r, "currentVoltage")], 64) if err != nil { return err diff --git a/pkg/collector/corechecks/nvidia/jetson/temperatureMetricsSender.go b/pkg/collector/corechecks/nvidia/jetson/temperatureMetricsSender.go index fb3490c081ff05..ae6ad8e5374f90 100644 --- a/pkg/collector/corechecks/nvidia/jetson/temperatureMetricsSender.go +++ b/pkg/collector/corechecks/nvidia/jetson/temperatureMetricsSender.go @@ -9,7 +9,6 @@ package nvidia import ( "errors" - "fmt" "regexp" "strconv" @@ -42,7 +41,7 @@ func (temperatureMetricsSender *temperatureMetricsSender) SendMetrics(sender sen if err != nil { return err } - temperatureZoneTags := []string{fmt.Sprintf("zone:%s", temperatureFields[i][regexSubexpIndex(r, "tempZone")])} + temperatureZoneTags := []string{"zone:" + temperatureFields[i][regexSubexpIndex(r, "tempZone")]} sender.Gauge("nvidia.jetson.temp", tempValue, "", temperatureZoneTags) } diff --git a/pkg/collector/corechecks/oracle/activity.go b/pkg/collector/corechecks/oracle/activity.go index 2462b005682df2..fad2bba1f07145 100644 --- a/pkg/collector/corechecks/oracle/activity.go +++ b/pkg/collector/corechecks/oracle/activity.go @@ -334,12 +334,12 @@ AND status = 'ACTIVE'`) } } else { if (sample.OpFlags & 128) == 128 { - statement = fmt.Sprintf("%s IN HARD PARSE", statement) + statement = statement + " IN HARD PARSE" } else if (sample.OpFlags & 16) == 16 { - statement = fmt.Sprintf("%s IN PARSE", statement) + statement = statement + " IN PARSE" } if (sample.OpFlags & 65536) == 65536 { - statement = fmt.Sprintf("%s IN CURSOR CLOSING", statement) + statement = statement + " IN CURSOR CLOSING" } } diff --git a/pkg/collector/corechecks/oracle/asm_diskgroups.go b/pkg/collector/corechecks/oracle/asm_diskgroups.go index 12f339fcfef1c9..21bf81d1e8a86b 100644 --- a/pkg/collector/corechecks/oracle/asm_diskgroups.go +++ b/pkg/collector/corechecks/oracle/asm_diskgroups.go @@ -36,9 +36,9 @@ func (c *Check) asmDiskgroups() error { for _, r := range rows { tags := append(c.tags, "asm_diskgroup_name:"+r.DiskgroupName) tags = append(tags, "state:"+r.State) - sendMetric(c, gauge, fmt.Sprintf("%s.asm_diskgroup.free_mb", common.IntegrationName), r.Free, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.asm_diskgroup.total_mb", common.IntegrationName), r.Total, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.asm_diskgroup.offline_disks", common.IntegrationName), float64(r.OfflineDisks), tags) + sendMetric(c, gauge, common.IntegrationName+".asm_diskgroup.free_mb", r.Free, tags) + sendMetric(c, gauge, common.IntegrationName+".asm_diskgroup.total_mb", r.Total, tags) + sendMetric(c, gauge, common.IntegrationName+".asm_diskgroup.offline_disks", float64(r.OfflineDisks), tags) } sender.Commit() return nil diff --git a/pkg/collector/corechecks/oracle/config/config.go b/pkg/collector/corechecks/oracle/config/config.go index c1631e97b4f14a..003fc2d45740a4 100644 --- a/pkg/collector/corechecks/oracle/config/config.go +++ b/pkg/collector/corechecks/oracle/config/config.go @@ -10,6 +10,7 @@ package config import ( "context" + "errors" "fmt" "strconv" "strings" @@ -311,7 +312,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data instance.Username = instance.User warnDeprecated("user", "username") } else { - return nil, fmt.Errorf("`username` is not configured") + return nil, errors.New("`username` is not configured") } } @@ -338,7 +339,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data service = initCfg.Service } if service != "" { - instance.Tags = append(instance.Tags, fmt.Sprintf("service:%s", service)) + instance.Tags = append(instance.Tags, "service:"+service) } if shouldPropagateAgentTags(instance.PropagateAgentTags, initCfg.PropagateAgentTags) { @@ -359,7 +360,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data // GetLogPrompt returns a config based prompt func GetLogPrompt(c InstanceConfig) string { - return fmt.Sprintf("%s>", GetConnectData(c)) + return GetConnectData(c) + ">" } // GetConnectData returns the connection configuration diff --git a/pkg/collector/corechecks/oracle/connection_handling.go b/pkg/collector/corechecks/oracle/connection_handling.go index 95c4e241fad860..b97c402a824831 100644 --- a/pkg/collector/corechecks/oracle/connection_handling.go +++ b/pkg/collector/corechecks/oracle/connection_handling.go @@ -43,7 +43,7 @@ func (c *Check) Connect() (*sqlx.DB, error) { if c.config.Protocol == "TCPS" { protocolString = "tcps://" if c.config.Wallet != "" { - walletString = fmt.Sprintf("?wallet_location=%s", c.config.Wallet) + walletString = "?wallet_location=" + c.config.Wallet } } connStr = fmt.Sprintf(`user="%s" password="%s" connectString="%s%s:%d/%s%s"`, c.config.Username, c.config.Password, protocolString, c.config.Server, c.config.Port, c.config.ServiceName, walletString) diff --git a/pkg/collector/corechecks/oracle/custom_queries.go b/pkg/collector/corechecks/oracle/custom_queries.go index ce11fd8ca7aa66..2481a0c9430cba 100644 --- a/pkg/collector/corechecks/oracle/custom_queries.go +++ b/pkg/collector/corechecks/oracle/custom_queries.go @@ -8,6 +8,7 @@ package oracle import ( + "errors" "fmt" "reflect" "strconv" @@ -50,7 +51,7 @@ func (c *Check) CustomQueries() error { return err } if db == nil { - return fmt.Errorf("empty connection") + return errors.New("empty connection") } c.dbCustomQueries = db } @@ -93,7 +94,7 @@ func (c *Check) CustomQueries() error { if pdb == "" { pdb = "cdb$root" } - _, err := c.dbCustomQueries.Exec(fmt.Sprintf("alter session set container = %s", pdb)) + _, err := c.dbCustomQueries.Exec("alter session set container = " + pdb) if err != nil { allErrors = concatenateError(allErrors, fmt.Sprintf("failed to set container %s %s", pdb, err)) reconnectOnConnectionError(c, &c.dbCustomQueries, err) @@ -112,7 +113,7 @@ func (c *Check) CustomQueries() error { var metricsFromSingleRow []metricRow var tags []string if pdb != "" { - tags = []string{fmt.Sprintf("pdb:%s", pdb)} + tags = []string{"pdb:" + pdb} } cols, err := rows.SliceScan() if err != nil { diff --git a/pkg/collector/corechecks/oracle/init.go b/pkg/collector/corechecks/oracle/init.go index efc8f963de8d0a..c50229cab1fd90 100644 --- a/pkg/collector/corechecks/oracle/init.go +++ b/pkg/collector/corechecks/oracle/init.go @@ -9,8 +9,10 @@ package oracle import ( "database/sql" + "errors" "fmt" "regexp" + "strconv" "strings" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -30,7 +32,7 @@ func (c *Check) init() error { copy(tags, c.configTags) if c.db == nil { - return fmt.Errorf("database connection not initialized") + return errors.New("database connection not initialized") } var i vInstance @@ -61,9 +63,9 @@ func (c *Check) init() error { } if i.HostName.Valid { - tags = append(tags, fmt.Sprintf("real_hostname:%s", i.HostName.String)) + tags = append(tags, "real_hostname:"+i.HostName.String) } - tags = append(tags, fmt.Sprintf("oracle_version:%s", c.dbVersion)) + tags = append(tags, "oracle_version:"+c.dbVersion) var d vDatabase if isDbVersionGreaterOrEqualThan(c, minMultitenantVersion) { @@ -76,7 +78,7 @@ func (c *Check) init() error { return fmt.Errorf("%s failed to query v$database: %w", c.logPrompt, err) } c.cdbName = d.Name - tags = append(tags, fmt.Sprintf("cdb:%s", c.cdbName)) + tags = append(tags, "cdb:"+c.cdbName) if c.config.ReportedHostname != "" { c.dbResolvedHostname = c.config.ReportedHostname @@ -92,7 +94,7 @@ func (c *Check) init() error { c.dbInstanceIdentifier = c.createDatabaseIdentifier() tags = append(tags, "database_instance:"+c.dbInstanceIdentifier) - tags = append(tags, fmt.Sprintf("dd.internal.resource:database_instance:%s", c.dbHostname)) + tags = append(tags, "dd.internal.resource:database_instance:"+c.dbHostname) isMultitenant := true if d.Cdb == "NO" { isMultitenant = false @@ -198,7 +200,7 @@ func (c *Check) createDatabaseIdentifier() string { } tags["resolved_hostname"] = c.dbResolvedHostname tags["server"] = c.config.Server - tags["port"] = fmt.Sprintf("%d", c.config.Port) + tags["port"] = strconv.Itoa(c.config.Port) tags["cdb_name"] = c.cdbName tags["service_name"] = c.config.ServiceName diff --git a/pkg/collector/corechecks/oracle/oracle.go b/pkg/collector/corechecks/oracle/oracle.go index 4c6a66e32dca72..0f139fce3e6df4 100644 --- a/pkg/collector/corechecks/oracle/oracle.go +++ b/pkg/collector/corechecks/oracle/oracle.go @@ -174,7 +174,7 @@ func (c *Check) Run() error { } if db == nil { c.Teardown() - handleServiceCheck(c, fmt.Errorf("empty connection")) + handleServiceCheck(c, errors.New("empty connection")) return fmt.Errorf("%s empty connection", c.logPrompt) } c.db = db @@ -221,7 +221,7 @@ func (c *Check) Run() error { if errConnect != nil { handleServiceCheck(c, errConnect) } else if db == nil { - handleServiceCheck(c, fmt.Errorf("empty connection")) + handleServiceCheck(c, errors.New("empty connection")) } else { handleServiceCheck(c, nil) } @@ -401,19 +401,19 @@ func (c *Check) Configure(senderManager sender.SenderManager, integrationConfigD tags := make([]string, len(c.config.Tags)) copy(tags, c.config.Tags) - tags = append(tags, fmt.Sprintf("dbms:%s", common.IntegrationName), fmt.Sprintf("ddagentversion:%s", c.agentVersion)) + tags = append(tags, "dbms:"+common.IntegrationName, "ddagentversion:"+c.agentVersion) tags = append(tags, fmt.Sprintf("dbm:%t", c.dbmEnabled)) if c.config.TnsAlias != "" { - tags = append(tags, fmt.Sprintf("tns-alias:%s", c.config.TnsAlias)) + tags = append(tags, "tns-alias:"+c.config.TnsAlias) } if c.config.Port != 0 { tags = append(tags, fmt.Sprintf("port:%d", c.config.Port)) } if c.config.Server != "" { - tags = append(tags, fmt.Sprintf("server:%s", c.config.Server)) + tags = append(tags, "server:"+c.config.Server) } if c.config.ServiceName != "" { - tags = append(tags, fmt.Sprintf("service_name:%s", c.config.ServiceName)) + tags = append(tags, "service_name:"+c.config.ServiceName) } c.logPrompt = config.GetLogPrompt(c.config.InstanceConfig) @@ -424,7 +424,7 @@ func (c *Check) Configure(senderManager sender.SenderManager, integrationConfigD } else { log.Errorf("%s failed to retrieve agent hostname: %s", c.logPrompt, err) } - tags = append(tags, fmt.Sprintf("ddagenthostname:%s", c.agentHostname)) + tags = append(tags, "ddagenthostname:"+c.agentHostname) c.configTags = make([]string, len(tags)) copy(c.configTags, tags) diff --git a/pkg/collector/corechecks/oracle/os_stats.go b/pkg/collector/corechecks/oracle/os_stats.go index 35050614db6ae0..61425819a2d8e3 100644 --- a/pkg/collector/corechecks/oracle/os_stats.go +++ b/pkg/collector/corechecks/oracle/os_stats.go @@ -59,7 +59,7 @@ func (c *Check) OS_Stats() error { var cpuCount float64 if !numCPUsFound { if err := c.db.Get(&cpuCount, "SELECT value FROM v$parameter WHERE name = 'cpu_count'"); err == nil { - sendMetricWithDefaultTags(c, gauge, fmt.Sprintf("%s.num_cpus", common.IntegrationName), cpuCount) + sendMetricWithDefaultTags(c, gauge, common.IntegrationName+".num_cpus", cpuCount) } else { log.Errorf("%s failed to get cpu_count: %s", c.logPrompt, err) } diff --git a/pkg/collector/corechecks/oracle/processes.go b/pkg/collector/corechecks/oracle/processes.go index 94c03cc148cca4..78393de5f7b50a 100644 --- a/pkg/collector/corechecks/oracle/processes.go +++ b/pkg/collector/corechecks/oracle/processes.go @@ -125,12 +125,12 @@ func (c *Check) ProcessMemory() error { tags = append(tags, "osuser:"+r.OsUser.String) } if c.config.ProcessMemory.Enabled { - sendMetric(c, gauge, fmt.Sprintf("%s.process.pga_used_memory", common.IntegrationName), r.PGAUsedMem, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.process.pga_allocated_memory", common.IntegrationName), r.PGAAllocMem, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.process.pga_freeable_memory", common.IntegrationName), r.PGAFreeableMem, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.process.pga_max_memory", common.IntegrationName), r.PGAMaxMem, tags) + sendMetric(c, gauge, common.IntegrationName+".process.pga_used_memory", r.PGAUsedMem, tags) + sendMetric(c, gauge, common.IntegrationName+".process.pga_allocated_memory", r.PGAAllocMem, tags) + sendMetric(c, gauge, common.IntegrationName+".process.pga_freeable_memory", r.PGAFreeableMem, tags) + sendMetric(c, gauge, common.IntegrationName+".process.pga_max_memory", r.PGAMaxMem, tags) // we send pga_maximum_memory for backward compatibility with the old Oracle integration - sendMetric(c, gauge, fmt.Sprintf("%s.process.pga_maximum_memory", common.IntegrationName), r.PGAMaxMem, tags) + sendMetric(c, gauge, common.IntegrationName+".process.pga_maximum_memory", r.PGAMaxMem, tags) } if c.config.InactiveSessions.Enabled && r.Status.Valid && r.Status.String == "INACTIVE" && r.LastCallEt.Valid { @@ -140,7 +140,7 @@ func (c *Check) ProcessMemory() error { if r.ClientInfo.Valid { tags = append(tags, "client_info:"+r.ClientInfo.String) } - sendMetric(c, gauge, fmt.Sprintf("%s.session.inactive_seconds", common.IntegrationName), float64(r.LastCallEt.Int64), tags) + sendMetric(c, gauge, common.IntegrationName+".session.inactive_seconds", float64(r.LastCallEt.Int64), tags) } } diff --git a/pkg/collector/corechecks/oracle/resource_manager.go b/pkg/collector/corechecks/oracle/resource_manager.go index 6ee16c7d4ab663..27ba0de4b61bf7 100644 --- a/pkg/collector/corechecks/oracle/resource_manager.go +++ b/pkg/collector/corechecks/oracle/resource_manager.go @@ -58,8 +58,8 @@ func (c *Check) resourceManager() error { if r.PlanName.Valid && r.PlanName.String != "" { tags = append(tags, "plan_name:"+r.PlanName.String) } - sendMetric(c, gauge, fmt.Sprintf("%s.resource_manager.cpu_consumed_time", common.IntegrationName), r.CPUConsumedTime, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.resource_manager.cpu_wait_time", common.IntegrationName), r.CPUWaitTime, tags) + sendMetric(c, gauge, common.IntegrationName+".resource_manager.cpu_consumed_time", r.CPUConsumedTime, tags) + sendMetric(c, gauge, common.IntegrationName+".resource_manager.cpu_wait_time", r.CPUWaitTime, tags) } sender.Commit() return nil diff --git a/pkg/collector/corechecks/oracle/sender_util.go b/pkg/collector/corechecks/oracle/sender_util.go index 35e173caa6c193..c476f7bcf2d38b 100644 --- a/pkg/collector/corechecks/oracle/sender_util.go +++ b/pkg/collector/corechecks/oracle/sender_util.go @@ -8,6 +8,7 @@ package oracle import ( + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" @@ -31,7 +32,7 @@ type metricSender func(string, float64, string, []string) func getMetricFunction(sender sender.Sender, method metricType) (metricSender, error) { if sender == nil { - return nil, fmt.Errorf("sender is nil") + return nil, errors.New("sender is nil") } methods := map[metricType]metricSender{ gauge: sender.Gauge, diff --git a/pkg/collector/corechecks/oracle/shared_memory.go b/pkg/collector/corechecks/oracle/shared_memory.go index 45e8fdf828af4a..5d812164eea0e2 100644 --- a/pkg/collector/corechecks/oracle/shared_memory.go +++ b/pkg/collector/corechecks/oracle/shared_memory.go @@ -57,8 +57,8 @@ func (c *Check) SharedMemory() error { memoryTag := strings.ReplaceAll(r.Memory, " ", "_") memoryTag = strings.ToLower(memoryTag) memoryTag = strings.ReplaceAll(memoryTag, "_size", "") - tags = append(tags, fmt.Sprintf("memory:%s", memoryTag)) - sendMetric(c, gauge, fmt.Sprintf("%s.shared_memory.size", common.IntegrationName), r.Size, tags) + tags = append(tags, "memory:"+memoryTag) + sendMetric(c, gauge, common.IntegrationName+".shared_memory.size", r.Size, tags) } sender.Commit() return nil diff --git a/pkg/collector/corechecks/oracle/statements.go b/pkg/collector/corechecks/oracle/statements.go index 360da5b8e0553b..37348daf0b4d23 100644 --- a/pkg/collector/corechecks/oracle/statements.go +++ b/pkg/collector/corechecks/oracle/statements.go @@ -799,7 +799,7 @@ func (c *Check) StatementMetrics() (int, error) { Statement: SQLStatement, Metadata: planStatementMetadata, } - tags := strings.Join(append(c.tags, fmt.Sprintf("pdb:%s", statementMetricRow.PDBName)), ",") + tags := strings.Join(append(c.tags, "pdb:"+statementMetricRow.PDBName), ",") planPayload := PlanPayload{ Timestamp: float64(time.Now().UnixMilli()), diff --git a/pkg/collector/corechecks/oracle/tablespaces.go b/pkg/collector/corechecks/oracle/tablespaces.go index 1dd30103a19429..e812a05e229be1 100644 --- a/pkg/collector/corechecks/oracle/tablespaces.go +++ b/pkg/collector/corechecks/oracle/tablespaces.go @@ -104,10 +104,10 @@ func (c *Check) Tablespaces() error { for _, r := range rows { tags := appendPDBTag(c.tags, r.PdbName) tags = append(tags, "tablespace:"+r.TablespaceName) - sendMetric(c, gauge, fmt.Sprintf("%s.tablespace.used", common.IntegrationName), r.Used, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.tablespace.size", common.IntegrationName), r.Size, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.tablespace.in_use", common.IntegrationName), r.InUse, tags) - sendMetric(c, gauge, fmt.Sprintf("%s.tablespace.offline", common.IntegrationName), r.Offline, tags) + sendMetric(c, gauge, common.IntegrationName+".tablespace.used", r.Used, tags) + sendMetric(c, gauge, common.IntegrationName+".tablespace.size", r.Size, tags) + sendMetric(c, gauge, common.IntegrationName+".tablespace.in_use", r.InUse, tags) + sendMetric(c, gauge, common.IntegrationName+".tablespace.offline", r.Offline, tags) } rowsMaxSize := []rowMaxSizeDB{} @@ -119,7 +119,7 @@ func (c *Check) Tablespaces() error { for _, r := range rowsMaxSize { tags := appendPDBTag(c.tags, r.PdbName) tags = append(tags, "tablespace:"+r.TablespaceName) - sendMetric(c, gauge, fmt.Sprintf("%s.tablespace.maxsize", common.IntegrationName), r.MaxSize, tags) + sendMetric(c, gauge, common.IntegrationName+".tablespace.maxsize", r.MaxSize, tags) } sender.Commit() diff --git a/pkg/collector/corechecks/orchestrator/ecs/ecs_test.go b/pkg/collector/corechecks/orchestrator/ecs/ecs_test.go index dc36f42e7d4bd2..8d2d36d8ccc422 100644 --- a/pkg/collector/corechecks/orchestrator/ecs/ecs_test.go +++ b/pkg/collector/corechecks/orchestrator/ecs/ecs_test.go @@ -8,7 +8,7 @@ package ecs import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/require" @@ -48,7 +48,7 @@ func (store *fakeWorkloadmetaStore) GetContainer(id string) (*workloadmeta.Conta if id == "938f6d263c464aa5985dc67ab7f38a7e-1714341084" { return container2(store.EnableV4), nil } - return nil, fmt.Errorf("container not found") + return nil, errors.New("container not found") } type fakeSender struct { @@ -171,10 +171,10 @@ func task(v4 bool, id string) *workloadmeta.ECSTask { ecsTask := &workloadmeta.ECSTask{ EntityID: workloadmeta.EntityID{ Kind: workloadmeta.KindECSTask, - ID: fmt.Sprintf("arn:aws:ecs:us-east-1:123456789012:task/%s", id), + ID: "arn:aws:ecs:us-east-1:123456789012:task/" + id, }, EntityMeta: workloadmeta.EntityMeta{ - Name: fmt.Sprintf("12345678-1234-1234-1234-123456789%s", id), + Name: "12345678-1234-1234-1234-123456789" + id, }, ClusterName: "ecs-cluster", LaunchType: workloadmeta.ECSLaunchTypeEC2, @@ -291,7 +291,7 @@ func expected(v4 bool, groupID int32, ids ...string) *process.CollectorECSTask { } newTask := &process.ECSTask{ - Arn: fmt.Sprintf("arn:aws:ecs:us-east-1:123456789012:task/%s", id), + Arn: "arn:aws:ecs:us-east-1:123456789012:task/" + id, LaunchType: "ec2", Family: "redis", Version: "1", diff --git a/pkg/collector/corechecks/orchestrator/kubeletconfig/kubeletconfig.go b/pkg/collector/corechecks/orchestrator/kubeletconfig/kubeletconfig.go index 72b6b63138d326..e697d7f3045a02 100644 --- a/pkg/collector/corechecks/orchestrator/kubeletconfig/kubeletconfig.go +++ b/pkg/collector/corechecks/orchestrator/kubeletconfig/kubeletconfig.go @@ -11,6 +11,7 @@ import ( "context" "errors" "fmt" + "strconv" "time" "github.com/twmb/murmur3" @@ -178,7 +179,7 @@ func (c *Check) Run() error { return errors.New("kubelet config not found in workloadmeta store") } - rv := fmt.Sprint(murmur3.Sum64(rawKubeletConfig)) + rv := strconv.FormatUint(murmur3.Sum64(rawKubeletConfig), 10) tags := []string{} diff --git a/pkg/collector/corechecks/orchestrator/pod/pod.go b/pkg/collector/corechecks/orchestrator/pod/pod.go index cc43a94c284bd9..ca0b81fd4a20c6 100644 --- a/pkg/collector/corechecks/orchestrator/pod/pod.go +++ b/pkg/collector/corechecks/orchestrator/pod/pod.go @@ -10,7 +10,6 @@ package pod import ( "context" "errors" - "fmt" "github.com/benbjohnson/clock" "go.uber.org/atomic" @@ -195,7 +194,7 @@ func (c *Check) Run() error { processResult, listed, processed := c.processor.Process(ctx, podList) if processed == -1 { - return fmt.Errorf("unable to process pods: a panic occurred") + return errors.New("unable to process pods: a panic occurred") } orchestrator.SetCacheStats(listed, processed, ctx.NodeType) diff --git a/pkg/collector/corechecks/servicediscovery/envs/envs_testutils.go b/pkg/collector/corechecks/servicediscovery/envs/envs_testutils.go index ead8e3d10e3e67..6fbf55174e2b7a 100644 --- a/pkg/collector/corechecks/servicediscovery/envs/envs_testutils.go +++ b/pkg/collector/corechecks/servicediscovery/envs/envs_testutils.go @@ -7,8 +7,6 @@ package envs -import "fmt" - // NewVariables returns a new [Variables] // for unit tests to verify that the input map has only target variables. func NewVariables(vars map[string]string) Variables { @@ -27,7 +25,7 @@ func GetExpectedEnvs() []string { expectedEnvs := make([]string, 0, len(targets)) for env := range targets { - expectedEnvs = append(expectedEnvs, fmt.Sprintf("%s=true", env)) + expectedEnvs = append(expectedEnvs, env+"=true") } return expectedEnvs } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 932a33f2ba8674..1f1354cec9a22d 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -13,13 +13,13 @@ import ( "bytes" "context" "encoding/json" - "fmt" "net" "net/http" "net/http/httptest" "os" "os/exec" "path/filepath" + "strconv" "strings" "syscall" "testing" @@ -240,7 +240,7 @@ func addSockets[P procfs.NetTCP | procfs.NetUDP](sockMap map[uint64]socketInfo, } func getNsInfoOld(pid int) (*namespaceInfo, error) { - path := kernel.HostProc(fmt.Sprintf("%d", pid)) + path := kernel.HostProc(strconv.Itoa(pid)) proc, err := procfs.NewFS(path) if err != nil { return nil, err diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_services_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_services_test.go index 3c0fdadc3084ab..4eceef7b936221 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_services_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_services_test.go @@ -11,7 +11,6 @@ package module import ( "context" - "fmt" "net" "os" "os/exec" @@ -512,7 +511,7 @@ func TestServicesAPMInstrumentationProvidedWithMaps(t *testing.T) { "..", "..", "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace", - fmt.Sprintf("libssl.so.%s", runtime.GOARCH)), + "libssl.so."+runtime.GOARCH), language: language.Python, }, { diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go index 1e39f7cd5841a7..6c15e943c79b53 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go @@ -8,6 +8,7 @@ package checkconfig import ( "context" + "errors" "fmt" "hash/fnv" "net" @@ -302,11 +303,11 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data c.Network = instance.Network if c.IPAddress == "" && c.Network == "" { - return nil, fmt.Errorf("`ip_address` or `network` config must be provided") + return nil, errors.New("`ip_address` or `network` config must be provided") } if c.IPAddress != "" && c.Network != "" { - return nil, fmt.Errorf("`ip_address` and `network` cannot be used at the same time") + return nil, errors.New("`ip_address` and `network` cannot be used at the same time") } if c.Network != "" { _, _, err = net.ParseCIDR(c.Network) @@ -453,7 +454,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data if useRCProfiles { if rcClient == nil { - return nil, fmt.Errorf("rc client not initialized, cannot use rc profiles") + return nil, errors.New("rc client not initialized, cannot use rc profiles") } if len(initConfig.Profiles) > 0 { // We don't support merging inline profiles with profiles fetched via remote @@ -475,7 +476,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data } if haveLegacyProfile || profiledefinition.IsLegacyMetrics(instance.Metrics) { if initConfig.Loader == "" && instance.Loader == "" { - return nil, fmt.Errorf("legacy profile detected with no loader specified, falling back to the Python loader") + return nil, errors.New("legacy profile detected with no loader specified, falling back to the Python loader") } } } @@ -572,16 +573,16 @@ func (c *CheckConfig) getResolvedSubnetName() string { func (c *CheckConfig) DeviceDigest(address string) DeviceDigest { h := fnv.New64() // Hash write never returns an error - h.Write([]byte(address)) //nolint:errcheck - h.Write([]byte(fmt.Sprintf("%d", c.Port))) //nolint:errcheck - h.Write([]byte(c.SnmpVersion)) //nolint:errcheck - h.Write([]byte(c.CommunityString)) //nolint:errcheck - h.Write([]byte(c.User)) //nolint:errcheck - h.Write([]byte(c.AuthKey)) //nolint:errcheck - h.Write([]byte(c.AuthProtocol)) //nolint:errcheck - h.Write([]byte(c.PrivKey)) //nolint:errcheck - h.Write([]byte(c.PrivProtocol)) //nolint:errcheck - h.Write([]byte(c.ContextName)) //nolint:errcheck + h.Write([]byte(address)) //nolint:errcheck + h.Write([]byte(strconv.FormatUint(uint64(c.Port), 10))) //nolint:errcheck + h.Write([]byte(c.SnmpVersion)) //nolint:errcheck + h.Write([]byte(c.CommunityString)) //nolint:errcheck + h.Write([]byte(c.User)) //nolint:errcheck + h.Write([]byte(c.AuthKey)) //nolint:errcheck + h.Write([]byte(c.AuthProtocol)) //nolint:errcheck + h.Write([]byte(c.PrivKey)) //nolint:errcheck + h.Write([]byte(c.PrivProtocol)) //nolint:errcheck + h.Write([]byte(c.ContextName)) //nolint:errcheck // Sort the addresses to get a stable digest addresses := make([]string, 0, len(c.IgnoredIPAddresses)) diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go index 7b29a735f49fd1..de44888cd1f3f4 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go @@ -7,6 +7,7 @@ package checkconfig import ( "encoding/json" + "errors" "fmt" "path/filepath" "regexp" @@ -2164,7 +2165,7 @@ func (m *mockRCClient) Subscribe(product data.Product, fn func(update map[string return } if m.subscribed { - m.err = fmt.Errorf("double subscription to ProductNDMDeviceProfilesCustom") + m.err = errors.New("double subscription to ProductNDMDeviceProfilesCustom") return } m.subscribed = true diff --git a/pkg/collector/corechecks/snmp/internal/common/oidtrie.go b/pkg/collector/corechecks/snmp/internal/common/oidtrie.go index 7974d2c98b18c8..c9775c8c2a1583 100644 --- a/pkg/collector/corechecks/snmp/internal/common/oidtrie.go +++ b/pkg/collector/corechecks/snmp/internal/common/oidtrie.go @@ -6,6 +6,7 @@ package common import ( + "errors" "fmt" "strconv" "strings" @@ -67,7 +68,7 @@ func oidToNumbers(oid string) ([]int, error) { func (o *OIDTrie) getNode(oid string) (*OIDTrie, error) { if oid == "" { - return nil, fmt.Errorf("invalid empty OID") + return nil, errors.New("invalid empty OID") } current := o oid = strings.TrimLeft(oid, ".") diff --git a/pkg/collector/corechecks/snmp/internal/common/utils_test.go b/pkg/collector/corechecks/snmp/internal/common/utils_test.go index 11281cf6ce268e..624d3c476988f9 100644 --- a/pkg/collector/corechecks/snmp/internal/common/utils_test.go +++ b/pkg/collector/corechecks/snmp/internal/common/utils_test.go @@ -6,7 +6,7 @@ package common import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -80,14 +80,14 @@ func Test_makeStringBatches(t *testing.T) { []string{"aa", "bb", "cc", "dd", "ee"}, 0, nil, - fmt.Errorf("batch size must be positive. invalid size: 0"), + errors.New("batch size must be positive. invalid size: 0"), }, { "negative batch size", []string{"aa", "bb", "cc", "dd", "ee"}, -1, nil, - fmt.Errorf("batch size must be positive. invalid size: -1"), + errors.New("batch size must be positive. invalid size: -1"), }, } for _, tt := range tests { diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go index df1b71290b5437..8fdf1be2bc1aec 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go @@ -8,7 +8,6 @@ package devicecheck import ( "encoding/json" "errors" - "fmt" "strings" "testing" "time" @@ -612,7 +611,7 @@ profiles: sender.AssertMetric(t, "Gauge", deviceUnreachableMetric, 0., "", snmpTags) sender.ResetCalls() - sess.ConnectErr = fmt.Errorf("some error") + sess.ConnectErr = errors.New("some error") err = deviceCk.Run(time.Now()) assert.Error(t, err, "some error") @@ -624,7 +623,7 @@ profiles: func TestRun_sessionCloseError(t *testing.T) { profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() - sess.CloseErr = fmt.Errorf("close error") + sess.CloseErr = errors.New("close error") sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { return sess, nil } diff --git a/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go b/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go index e5c4a09455cf8e..76e4318b61d68f 100644 --- a/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go +++ b/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go @@ -6,6 +6,7 @@ package discovery import ( + "errors" "fmt" "net" "testing" @@ -254,7 +255,7 @@ func TestDiscovery_checkDevice(t *testing.T) { // session configuration error discovery.sessionFactory = func(*checkconfig.CheckConfig) (session.Session, error) { - return nil, fmt.Errorf("some error") + return nil, errors.New("some error") } err = discovery.checkDevice(job) @@ -264,7 +265,7 @@ func TestDiscovery_checkDevice(t *testing.T) { // Test session.Connect() error checkDeviceOnce() - sess.ConnectErr = fmt.Errorf("connection error") + sess.ConnectErr = errors.New("connection error") err = discovery.checkDevice(job) assert.Nil(t, err) assert.Equal(t, 0, len(discovery.discoveredDevices)) @@ -276,7 +277,7 @@ func TestDiscovery_checkDevice(t *testing.T) { return sess, nil } var nilPacket *gosnmp.SnmpPacket - sess.On("Get", []string{"1.3.6.1.2.1.1.2.0"}).Return(nilPacket, fmt.Errorf("get error")) + sess.On("Get", []string{"1.3.6.1.2.1.1.2.0"}).Return(nilPacket, errors.New("get error")) err = discovery.checkDevice(job) // check device with Get error assert.Nil(t, err) assert.Equal(t, 0, len(discovery.discoveredDevices)) diff --git a/pkg/collector/corechecks/snmp/internal/fetch/fetch_column.go b/pkg/collector/corechecks/snmp/internal/fetch/fetch_column.go index e4d0a0eed25631..a9fc088ed4f159 100644 --- a/pkg/collector/corechecks/snmp/internal/fetch/fetch_column.go +++ b/pkg/collector/corechecks/snmp/internal/fetch/fetch_column.go @@ -106,7 +106,7 @@ func fetchColumnOids(sess session.Session, oids []string, bulkMaxRepetitions uin func getResults(sess session.Session, requestOids []string, bulkMaxRepetitions uint32, fetchStrategy columnFetchStrategy) (*gosnmp.SnmpPacket, error) { if sess.GetVersion() == gosnmp.Version1 && fetchStrategy == useGetBulk { // snmp v1 doesn't support GetBulk - return nil, fmt.Errorf("GetBulk not supported in SNMP v1") + return nil, errors.New("GetBulk not supported in SNMP v1") } var results *gosnmp.SnmpPacket diff --git a/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go b/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go index 4647f291869e43..f39255b708e5e9 100644 --- a/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go +++ b/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go @@ -9,7 +9,6 @@ import ( "bufio" "bytes" "errors" - "fmt" "slices" "strings" "testing" @@ -372,8 +371,8 @@ func Test_fetchColumnOidsBatch_usingGetBulkAndGetNextFallback(t *testing.T) { }, } - sess.On("GetBulk", []string{"1.1.1", "1.1.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) - sess.On("GetBulk", []string{"1.1.1"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) + sess.On("GetBulk", []string{"1.1.1", "1.1.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) + sess.On("GetBulk", []string{"1.1.1"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) // First batch sess.On("GetNext", []string{"1.1.1", "1.1.2"}).Return(&bulkPacket, nil) @@ -578,8 +577,8 @@ func Test_fetchOidBatchSize_zeroSizeError(t *testing.T) { func Test_fetchOidBatchSize_fetchError(t *testing.T) { sess := session.CreateMockSession() - sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("my error")) - sess.On("Get", []string{"1.1.1.1.0"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("my error")) + sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0"}).Return(&gosnmp.SnmpPacket{}, errors.New("my error")) + sess.On("Get", []string{"1.1.1.1.0"}).Return(&gosnmp.SnmpPacket{}, errors.New("my error")) oids := []string{"1.1.1.1.0", "1.1.1.2.0", "1.1.1.3.0", "1.1.1.4.0", "1.1.1.5.0", "1.1.1.6.0"} batchSizeOptimizer := newOidBatchSizeOptimizer(snmpGet, 2) @@ -833,14 +832,14 @@ func Test_fetchValues_errors(t *testing.T) { name: "invalid batch size", maxReps: checkconfig.DefaultBulkMaxRepetitions, ScalarOIDs: []string{"1.1", "1.2"}, - expectedError: fmt.Errorf("failed to fetch scalar oids with batching: failed to create oid batches: batch size must be positive. invalid size: 0"), + expectedError: errors.New("failed to fetch scalar oids with batching: failed to create oid batches: batch size must be positive. invalid size: 0"), }, { name: "get fetch error", maxReps: checkconfig.DefaultBulkMaxRepetitions, batchSize: 10, ScalarOIDs: []string{"1.1", "2.2"}, - expectedError: fmt.Errorf("failed to fetch scalar oids with batching: failed to fetch scalar oids: fetch scalar: failed getting oids `[1.1]` using Get: get error"), + expectedError: errors.New("failed to fetch scalar oids with batching: failed to fetch scalar oids: fetch scalar: failed getting oids `[1.1]` using Get: get error"), }, { name: "bulk fetch error", @@ -848,18 +847,18 @@ func Test_fetchValues_errors(t *testing.T) { batchSize: 10, ScalarOIDs: []string{}, ColumnOIDs: []string{"1.1", "2.2"}, - expectedError: fmt.Errorf("failed to fetch oids with GetNext batching: failed to fetch column oids: fetch column: failed getting oids `[1.1]` using GetNext: getnext error"), + expectedError: errors.New("failed to fetch oids with GetNext batching: failed to fetch column oids: fetch column: failed getting oids `[1.1]` using GetNext: getnext error"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sess := session.CreateMockSession() - sess.On("Get", []string{"1.1", "2.2"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("get error")) - sess.On("Get", []string{"1.1"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("get error")) - sess.On("GetBulk", []string{"1.1", "2.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) - sess.On("GetBulk", []string{"1.1"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) - sess.On("GetNext", []string{"1.1", "2.2"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("getnext error")) - sess.On("GetNext", []string{"1.1"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("getnext error")) + sess.On("Get", []string{"1.1", "2.2"}).Return(&gosnmp.SnmpPacket{}, errors.New("get error")) + sess.On("Get", []string{"1.1"}).Return(&gosnmp.SnmpPacket{}, errors.New("get error")) + sess.On("GetBulk", []string{"1.1", "2.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) + sess.On("GetBulk", []string{"1.1"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) + sess.On("GetNext", []string{"1.1", "2.2"}).Return(&gosnmp.SnmpPacket{}, errors.New("getnext error")) + sess.On("GetNext", []string{"1.1"}).Return(&gosnmp.SnmpPacket{}, errors.New("getnext error")) batchSizeOptimizers := NewOidBatchSizeOptimizers(tt.batchSize) @@ -1100,11 +1099,11 @@ func Test_batchSizeOptimizers(t *testing.T) { }, } - sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0", "1.1.1.3.0", "1.1.1.4.0"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("my error")) + sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0", "1.1.1.3.0", "1.1.1.4.0"}).Return(&gosnmp.SnmpPacket{}, errors.New("my error")) sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0"}).Return(&scalarPacket1, nil) sess.On("Get", []string{"1.1.1.3.0", "1.1.1.4.0"}).Return(&scalarPacket2, nil) - sess.On("GetBulk", []string{"1.1.1", "1.1.2", "1.1.3", "1.1.4"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) + sess.On("GetBulk", []string{"1.1.1", "1.1.2", "1.1.3", "1.1.4"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) sess.On("GetBulk", []string{"1.1.1", "1.1.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&bulkPacket1, nil) sess.On("GetBulk", []string{"1.1.3", "1.1.4"}, checkconfig.DefaultBulkMaxRepetitions).Return(&bulkPacket2, nil) @@ -1196,7 +1195,7 @@ func Test_batchSizeOptimizers(t *testing.T) { Variables: []gosnmp.SnmpPDU{scalarVariable4}, } - sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0", "1.1.1.3.0", "1.1.1.4.0"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("my error")) + sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0", "1.1.1.3.0", "1.1.1.4.0"}).Return(&gosnmp.SnmpPacket{}, errors.New("my error")) sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0", "1.1.1.3.0"}).Return(&scalarPacket1, nil) sess.On("Get", []string{"1.1.1.4.0"}).Return(&scalarPacket2, nil) @@ -1253,7 +1252,7 @@ func Test_batchSizeOptimizers(t *testing.T) { Variables: []gosnmp.SnmpPDU{scalarVariable4}, } - sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("my error")) + sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0"}).Return(&gosnmp.SnmpPacket{}, errors.New("my error")) sess.On("Get", []string{"1.1.1.1.0"}).Return(&scalarPacket1, nil) sess.On("Get", []string{"1.1.1.2.0"}).Return(&scalarPacket2, nil) sess.On("Get", []string{"1.1.1.3.0"}).Return(&scalarPacket3, nil) @@ -1305,9 +1304,9 @@ func Test_batchSizeOptimizers(t *testing.T) { }, } - sess.On("GetBulk", []string{"1.1.1", "1.1.2", "1.1.3"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) - sess.On("GetBulk", []string{"1.1.1", "1.1.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) - sess.On("GetBulk", []string{"1.1.1"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) + sess.On("GetBulk", []string{"1.1.1", "1.1.2", "1.1.3"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) + sess.On("GetBulk", []string{"1.1.1", "1.1.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) + sess.On("GetBulk", []string{"1.1.1"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) sess.On("GetNext", []string{"1.1.1", "1.1.2", "1.1.3", "1.1.4"}).Return(&nextPacket1, nil) sess.On("GetNext", []string{"1.1.1.1", "1.1.2.1", "1.1.3.1", "1.1.4.1"}).Return(&gosnmp.SnmpPacket{}, nil) @@ -1394,13 +1393,13 @@ func Test_batchSizeOptimizers(t *testing.T) { sess.On("Get", []string{"1.1.1.1.0", "1.1.1.2.0", "1.1.1.3.0", "1.1.1.4.0"}).Return(&scalarPacket1, nil) - sess.On("GetBulk", []string{"1.1.1", "1.1.2", "1.1.3", "1.1.4"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) - sess.On("GetBulk", []string{"1.1.1", "1.1.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) - sess.On("GetBulk", []string{"1.1.1"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("bulk error")) + sess.On("GetBulk", []string{"1.1.1", "1.1.2", "1.1.3", "1.1.4"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) + sess.On("GetBulk", []string{"1.1.1", "1.1.2"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) + sess.On("GetBulk", []string{"1.1.1"}, checkconfig.DefaultBulkMaxRepetitions).Return(&gosnmp.SnmpPacket{}, errors.New("bulk error")) - sess.On("GetNext", []string{"1.1.1", "1.1.2", "1.1.3", "1.1.4"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("next error")) - sess.On("GetNext", []string{"1.1.1", "1.1.2"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("next error")) - sess.On("GetNext", []string{"1.1.1"}).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("next error")) + sess.On("GetNext", []string{"1.1.1", "1.1.2", "1.1.3", "1.1.4"}).Return(&gosnmp.SnmpPacket{}, errors.New("next error")) + sess.On("GetNext", []string{"1.1.1", "1.1.2"}).Return(&gosnmp.SnmpPacket{}, errors.New("next error")) + sess.On("GetNext", []string{"1.1.1"}).Return(&gosnmp.SnmpPacket{}, errors.New("next error")) return sess }, diff --git a/pkg/collector/corechecks/snmp/internal/profile/rc_provider.go b/pkg/collector/corechecks/snmp/internal/profile/rc_provider.go index a94df0114ac5a7..6491b646d0d7ab 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/rc_provider.go +++ b/pkg/collector/corechecks/snmp/internal/profile/rc_provider.go @@ -7,6 +7,7 @@ package profile import ( "encoding/json" + "errors" "fmt" "maps" "slices" @@ -48,7 +49,7 @@ func buildAndSubscribeRCProvider(rcClient rcclient.Component) (*UpdatableProvide // Load OOTB profiles from YAML defaultProfiles := getYamlDefaultProfiles() if defaultProfiles == nil { - return nil, fmt.Errorf("could not find OOTB profiles") + return nil, errors.New("could not find OOTB profiles") } userProfiles := make(ProfileConfigMap) diff --git a/pkg/collector/corechecks/snmp/internal/profile/sysobjectid_resolver.go b/pkg/collector/corechecks/snmp/internal/profile/sysobjectid_resolver.go index 0177083d02740e..5beab8f49805e9 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/sysobjectid_resolver.go +++ b/pkg/collector/corechecks/snmp/internal/profile/sysobjectid_resolver.go @@ -6,6 +6,7 @@ package profile import ( + "errors" "fmt" "strconv" "strings" @@ -16,7 +17,7 @@ func getMostSpecificOid(oids []string) (string, error) { var mostSpecificOid string if len(oids) == 0 { - return "", fmt.Errorf("cannot get most specific oid from empty list of oids") + return "", errors.New("cannot get most specific oid from empty list of oids") } for _, oid := range oids { diff --git a/pkg/collector/corechecks/snmp/internal/profile/sysobjectid_resolver_test.go b/pkg/collector/corechecks/snmp/internal/profile/sysobjectid_resolver_test.go index 940d04d7ecd073..e86efb81749c4c 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/sysobjectid_resolver_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/sysobjectid_resolver_test.go @@ -6,9 +6,10 @@ package profile import ( - "fmt" - "github.com/stretchr/testify/assert" + "errors" "testing" + + "github.com/stretchr/testify/assert" ) func Test_getMostSpecificOid(t *testing.T) { @@ -28,13 +29,13 @@ func Test_getMostSpecificOid(t *testing.T) { "error on empty oids", []string{}, "", - fmt.Errorf("cannot get most specific oid from empty list of oids"), + errors.New("cannot get most specific oid from empty list of oids"), }, { "error on parsing", []string{"a.1.2.3"}, "", - fmt.Errorf("error parsing part `a` for pattern `a.1.2.3`: strconv.Atoi: parsing \"a\": invalid syntax"), + errors.New("error parsing part `a` for pattern `a.1.2.3`: strconv.Atoi: parsing \"a\": invalid syntax"), }, { "most lengthy", diff --git a/pkg/collector/corechecks/snmp/internal/report/report_bandwidth_usage_test.go b/pkg/collector/corechecks/snmp/internal/report/report_bandwidth_usage_test.go index ab15ecfdbd0e42..73b9733841c692 100644 --- a/pkg/collector/corechecks/snmp/internal/report/report_bandwidth_usage_test.go +++ b/pkg/collector/corechecks/snmp/internal/report/report_bandwidth_usage_test.go @@ -6,10 +6,11 @@ package report import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/common" + "errors" "testing" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/common" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -143,7 +144,7 @@ func Test_metricSender_sendBandwidthUsageMetric(t *testing.T) { }, }, expectedMetric: []Metric{}, - expectedError: fmt.Errorf("bandwidth usage: missing `ifHighSpeed` metric, skipping metric. fullIndex=9"), + expectedError: errors.New("bandwidth usage: missing `ifHighSpeed` metric, skipping metric. fullIndex=9"), rateMap: interfaceRateMapWithPrevious(), }, { @@ -167,7 +168,7 @@ func Test_metricSender_sendBandwidthUsageMetric(t *testing.T) { }, }, expectedMetric: []Metric{}, - expectedError: fmt.Errorf("bandwidth usage: missing `ifHCInOctets` metric, skipping this row. fullIndex=9"), + expectedError: errors.New("bandwidth usage: missing `ifHCInOctets` metric, skipping this row. fullIndex=9"), rateMap: interfaceRateMapWithPrevious(), }, { @@ -191,7 +192,7 @@ func Test_metricSender_sendBandwidthUsageMetric(t *testing.T) { }, }, expectedMetric: []Metric{}, - expectedError: fmt.Errorf("bandwidth usage: missing `ifHCOutOctets` metric, skipping this row. fullIndex=9"), + expectedError: errors.New("bandwidth usage: missing `ifHCOutOctets` metric, skipping this row. fullIndex=9"), }, { name: "missing ifHCInOctets value", @@ -220,7 +221,7 @@ func Test_metricSender_sendBandwidthUsageMetric(t *testing.T) { }, }, expectedMetric: []Metric{}, - expectedError: fmt.Errorf("bandwidth usage: missing value for `ifHCInOctets` metric, skipping this row. fullIndex=9"), + expectedError: errors.New("bandwidth usage: missing value for `ifHCInOctets` metric, skipping this row. fullIndex=9"), }, { name: "missing ifHighSpeed value", @@ -249,7 +250,7 @@ func Test_metricSender_sendBandwidthUsageMetric(t *testing.T) { }, }, expectedMetric: []Metric{}, - expectedError: fmt.Errorf("bandwidth usage: missing value for `ifHighSpeed`, skipping this row. fullIndex=9"), + expectedError: errors.New("bandwidth usage: missing value for `ifHighSpeed`, skipping this row. fullIndex=9"), }, { name: "cannot convert ifHighSpeed to float", @@ -278,7 +279,7 @@ func Test_metricSender_sendBandwidthUsageMetric(t *testing.T) { }, }, expectedMetric: []Metric{}, - expectedError: fmt.Errorf("failed to convert ifHighSpeedValue to float64: failed to parse `abc`: strconv.ParseFloat: parsing \"abc\": invalid syntax"), + expectedError: errors.New("failed to convert ifHighSpeedValue to float64: failed to parse `abc`: strconv.ParseFloat: parsing \"abc\": invalid syntax"), }, { name: "cannot convert ifHCInOctets to float", @@ -307,7 +308,7 @@ func Test_metricSender_sendBandwidthUsageMetric(t *testing.T) { }, }, expectedMetric: []Metric{}, - expectedError: fmt.Errorf("failed to convert octetsValue to float64: failed to parse `abc`: strconv.ParseFloat: parsing \"abc\": invalid syntax"), + expectedError: errors.New("failed to convert octetsValue to float64: failed to parse `abc`: strconv.ParseFloat: parsing \"abc\": invalid syntax"), }, { name: "[custom speed] snmp.ifBandwidthIn/OutUsage.rate with custom interface speed matched by name", diff --git a/pkg/collector/corechecks/snmp/internal/report/report_memory_usage.go b/pkg/collector/corechecks/snmp/internal/report/report_memory_usage.go index f7ef5201e22ed0..69218818ac317c 100644 --- a/pkg/collector/corechecks/snmp/internal/report/report_memory_usage.go +++ b/pkg/collector/corechecks/snmp/internal/report/report_memory_usage.go @@ -6,6 +6,7 @@ package report import ( + "errors" "fmt" "strings" @@ -293,10 +294,10 @@ func (ms *MetricSender) trySendColumnMemoryUsage(columnSamples map[string]map[st func evaluateMemoryUsage(memoryUsed float64, memoryTotal float64) (float64, error) { if memoryTotal == 0 { - return 0, fmt.Errorf("cannot evaluate memory usage, total memory is 0") + return 0, errors.New("cannot evaluate memory usage, total memory is 0") } if memoryUsed < 0 { - return 0, fmt.Errorf("cannot evaluate memory usage, memory used is < 0") + return 0, errors.New("cannot evaluate memory usage, memory used is < 0") } return (memoryUsed / memoryTotal) * 100, nil } diff --git a/pkg/collector/corechecks/snmp/internal/report/report_memory_usage_test.go b/pkg/collector/corechecks/snmp/internal/report/report_memory_usage_test.go index 0a3e589967d75d..42373778fcd564 100644 --- a/pkg/collector/corechecks/snmp/internal/report/report_memory_usage_test.go +++ b/pkg/collector/corechecks/snmp/internal/report/report_memory_usage_test.go @@ -6,7 +6,7 @@ package report import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -84,7 +84,7 @@ func Test_metricSender_sendMemoryUsageMetric(t *testing.T) { }, }}, []Metric{}, - fmt.Errorf("missing free, total memory metrics, skipping scalar memory usage"), + errors.New("missing free, total memory metrics, skipping scalar memory usage"), }, { "should not emit evaluated snmp.memory.usage when only scalar memory.free is collected", @@ -98,7 +98,7 @@ func Test_metricSender_sendMemoryUsageMetric(t *testing.T) { }, }}, []Metric{}, - fmt.Errorf("missing used, total memory metrics, skipping scalar memory usage"), + errors.New("missing used, total memory metrics, skipping scalar memory usage"), }, { "should not emit evaluated snmp.memory.usage when only scalar memory.total is collected", @@ -112,7 +112,7 @@ func Test_metricSender_sendMemoryUsageMetric(t *testing.T) { }, }}, []Metric{}, - fmt.Errorf("missing used, free memory metrics, skipping scalar memory usage"), + errors.New("missing used, free memory metrics, skipping scalar memory usage"), }, { "should not emit evaluated snmp.memory.usage when only column memory.used is collected", @@ -135,7 +135,7 @@ func Test_metricSender_sendMemoryUsageMetric(t *testing.T) { }, }}, []Metric{}, - fmt.Errorf("missing free, total memory metrics, skipping column memory usage"), + errors.New("missing free, total memory metrics, skipping column memory usage"), }, { "should not emit evaluated snmp.memory.usage when only column memory.free is collected", @@ -158,7 +158,7 @@ func Test_metricSender_sendMemoryUsageMetric(t *testing.T) { }, }}, []Metric{}, - fmt.Errorf("missing used, total memory metrics, skipping column memory usage"), + errors.New("missing used, total memory metrics, skipping column memory usage"), }, { "should not emit evaluated snmp.memory.usage when only column memory.total is collected", @@ -181,13 +181,13 @@ func Test_metricSender_sendMemoryUsageMetric(t *testing.T) { }, }}, []Metric{}, - fmt.Errorf("missing used, free memory metrics, skipping column memory usage"), + errors.New("missing used, free memory metrics, skipping column memory usage"), }, { "should not emit evaluated snmp.memory.usage when no memory metric is collected", MetricSamplesStore{}, []Metric{}, - fmt.Errorf("missing used, free, total memory metrics, skipping column memory usage"), + errors.New("missing used, free, total memory metrics, skipping column memory usage"), }, { "should emit evaluated snmp.memory.usage when scalar memory.used and memory.total are collected", diff --git a/pkg/collector/corechecks/snmp/internal/session/fake_session.go b/pkg/collector/corechecks/snmp/internal/session/fake_session.go index 21e687c5c5381e..18ccefafc399d2 100644 --- a/pkg/collector/corechecks/snmp/internal/session/fake_session.go +++ b/pkg/collector/corechecks/snmp/internal/session/fake_session.go @@ -40,7 +40,7 @@ func oidToNumbers(oid string) ([]int, error) { func numbersToOID(nums []int) string { segments := make([]string, len(nums)) for i, k := range nums { - segments[i] = fmt.Sprint(k) + segments[i] = strconv.Itoa(k) } return strings.Join(segments, ".") } diff --git a/pkg/collector/corechecks/snmp/internal/session/session.go b/pkg/collector/corechecks/snmp/internal/session/session.go index 79be26ea143247..1ccbf56b241e8f 100644 --- a/pkg/collector/corechecks/snmp/internal/session/session.go +++ b/pkg/collector/corechecks/snmp/internal/session/session.go @@ -6,6 +6,7 @@ package session import ( + "errors" "fmt" stdlog "log" "strings" @@ -154,7 +155,7 @@ func NewGosnmpSession(config *checkconfig.CheckConfig) (Session, error) { PrivacyPassphrase: config.PrivKey, } } else { - return nil, fmt.Errorf("an authentication method needs to be provided") + return nil, errors.New("an authentication method needs to be provided") } s.gosnmpInst.Target = config.IPAddress diff --git a/pkg/collector/corechecks/snmp/internal/session/session_test.go b/pkg/collector/corechecks/snmp/internal/session/session_test.go index 8810c268a6482c..ecba81c0ce25c9 100644 --- a/pkg/collector/corechecks/snmp/internal/session/session_test.go +++ b/pkg/collector/corechecks/snmp/internal/session/session_test.go @@ -8,7 +8,7 @@ package session import ( "bufio" "bytes" - "fmt" + "errors" "io" stdlog "log" "testing" @@ -42,7 +42,7 @@ func Test_snmpSession_Configure(t *testing.T) { IPAddress: "1.2.3.4", Port: uint16(1234), }, - expectedError: fmt.Errorf("an authentication method needs to be provided"), + expectedError: errors.New("an authentication method needs to be provided"), }, { name: "valid v1 config", @@ -228,7 +228,7 @@ func Test_snmpSession_Configure(t *testing.T) { AuthProtocol: "invalid", }, expectedVersion: gosnmp.Version1, // default, not configured - expectedError: fmt.Errorf("unsupported authentication protocol: invalid"), + expectedError: errors.New("unsupported authentication protocol: invalid"), expectedSecurityParameters: nil, // default, not configured }, { @@ -245,7 +245,7 @@ func Test_snmpSession_Configure(t *testing.T) { PrivProtocol: "invalid", }, expectedVersion: gosnmp.Version1, // default, not configured - expectedError: fmt.Errorf("unsupported privacy protocol: invalid"), + expectedError: errors.New("unsupported privacy protocol: invalid"), expectedSecurityParameters: nil, // default, not configured }, { @@ -259,7 +259,7 @@ func Test_snmpSession_Configure(t *testing.T) { OidBatchSize: 100, }, expectedVersion: gosnmp.Version1, - expectedError: fmt.Errorf("config oidBatchSize (100) cannot be higher than gosnmp.MaxOids: 60"), + expectedError: errors.New("config oidBatchSize (100) cannot be higher than gosnmp.MaxOids: 60"), }, } for _, tt := range tests { diff --git a/pkg/collector/corechecks/snmp/internal/valuestore/gosnmp_value_test.go b/pkg/collector/corechecks/snmp/internal/valuestore/gosnmp_value_test.go index 5584e10e26376b..2ec32db70b2ff9 100644 --- a/pkg/collector/corechecks/snmp/internal/valuestore/gosnmp_value_test.go +++ b/pkg/collector/corechecks/snmp/internal/valuestore/gosnmp_value_test.go @@ -6,7 +6,7 @@ package valuestore import ( - "fmt" + "errors" "testing" "github.com/gosnmp/gosnmp" @@ -109,7 +109,7 @@ func Test_getValueFromPDU(t *testing.T) { }, "1.2.3", ResultValue{}, - fmt.Errorf("oid .1.2.3: IPAddress should be string type but got type `` and value ``"), + errors.New("oid .1.2.3: IPAddress should be string type but got type `` and value ``"), }, { "Null", @@ -120,7 +120,7 @@ func Test_getValueFromPDU(t *testing.T) { }, "1.2.3", ResultValue{}, - fmt.Errorf("oid .1.2.3: invalid type: Null"), + errors.New("oid .1.2.3: invalid type: Null"), }, { "Counter32", @@ -208,7 +208,7 @@ func Test_getValueFromPDU(t *testing.T) { }, "1.2.3", ResultValue{}, - fmt.Errorf("oid .1.2.3: invalid type: NoSuchObject"), + errors.New("oid .1.2.3: invalid type: NoSuchObject"), }, { "NoSuchInstance", @@ -219,7 +219,7 @@ func Test_getValueFromPDU(t *testing.T) { }, "1.2.3", ResultValue{}, - fmt.Errorf("oid .1.2.3: invalid type: NoSuchInstance"), + errors.New("oid .1.2.3: invalid type: NoSuchInstance"), }, { "gosnmp.OctetString with wrong type", @@ -230,7 +230,7 @@ func Test_getValueFromPDU(t *testing.T) { }, "1.2.3", ResultValue{}, - fmt.Errorf("oid .1.2.3: OctetString/BitString should be []byte type but got type `float64` and value `1`"), + errors.New("oid .1.2.3: OctetString/BitString should be []byte type but got type `float64` and value `1`"), }, { "gosnmp.OpaqueFloat with wrong type", @@ -241,7 +241,7 @@ func Test_getValueFromPDU(t *testing.T) { }, "1.2.3", ResultValue{}, - fmt.Errorf("oid .1.2.3: OpaqueFloat should be float32 type but got type `string` and value `abc`"), + errors.New("oid .1.2.3: OpaqueFloat should be float32 type but got type `string` and value `abc`"), }, { "gosnmp.OpaqueDouble with wrong type", @@ -252,7 +252,7 @@ func Test_getValueFromPDU(t *testing.T) { }, "1.2.3", ResultValue{}, - fmt.Errorf("oid .1.2.3: OpaqueDouble should be float64 type but got type `string` and value `abc`"), + errors.New("oid .1.2.3: OpaqueDouble should be float64 type but got type `string` and value `abc`"), }, { "gosnmp.ObjectIdentifier with wrong type", @@ -263,7 +263,7 @@ func Test_getValueFromPDU(t *testing.T) { }, "1.2.3", ResultValue{}, - fmt.Errorf("oid .1.2.3: ObjectIdentifier should be string type but got type `int` and value `1`"), + errors.New("oid .1.2.3: ObjectIdentifier should be string type but got type `int` and value `1`"), }, } for _, tt := range tests { diff --git a/pkg/collector/corechecks/snmp/snmp_test.go b/pkg/collector/corechecks/snmp/snmp_test.go index 4ea007f4cde4ba..ae1610e26bbf79 100644 --- a/pkg/collector/corechecks/snmp/snmp_test.go +++ b/pkg/collector/corechecks/snmp/snmp_test.go @@ -8,6 +8,7 @@ package snmp import ( "bytes" "encoding/json" + "errors" "fmt" "testing" "time" @@ -970,7 +971,7 @@ func TestServiceCheckFailures(t *testing.T) { sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { return sess, nil } - sess.ConnectErr = fmt.Errorf("can't connect") + sess.ConnectErr = errors.New("can't connect") chk := Check{sessionFactory: sessionFactory} // language=yaml @@ -1148,12 +1149,12 @@ func TestCheck_Run(t *testing.T) { }{ { name: "connection error", - sessionConnError: fmt.Errorf("can't connect"), + sessionConnError: errors.New("can't connect"), expectedErr: "snmp connection error: can't connect", }, { name: "failed to fetch sysobjectid", - sysObjectIDError: fmt.Errorf("no sysobjectid"), + sysObjectIDError: errors.New("no sysobjectid"), valuesPacket: valuesPacketUptime, reachableValuesPacket: gosnmplib.MockValidReachableGetNextPacket, expectedErr: "failed to autodetect profile: failed to fetch sysobjectid: cannot get sysobjectid: no sysobjectid", @@ -1193,23 +1194,23 @@ func TestCheck_Run(t *testing.T) { reachableValuesPacket: gosnmplib.MockValidReachableGetNextPacket, sysObjectIDPacket: sysObjectIDPacketOkMock, valuesPacket: valuesPacketErrMock, - valuesError: fmt.Errorf("no value"), + valuesError: errors.New("no value"), expectedErr: "failed to fetch values: failed to fetch scalar oids with batching: failed to fetch scalar oids: fetch scalar: error getting oids `[1.2.3.4.5 1.3.6.1.2.1.1.3.0 1.3.6.1.2.1.1.5.0 1.3.6.1.4.1.3375.2.1.1.2.1.44.0 1.3.6.1.4.1.3375.2.1.1.2.1.44.999]`: no value", }, { name: "failed to fetch sysobjectid and failed to fetch values", reachableValuesPacket: gosnmplib.MockValidReachableGetNextPacket, - sysObjectIDError: fmt.Errorf("no sysobjectid"), + sysObjectIDError: errors.New("no sysobjectid"), valuesPacket: valuesPacketErrMock, - valuesError: fmt.Errorf("no value"), + valuesError: errors.New("no value"), expectedErr: "failed to autodetect profile: failed to fetch sysobjectid: cannot get sysobjectid: no sysobjectid; failed to fetch values: failed to fetch scalar oids with batching: failed to fetch scalar oids: fetch scalar: error getting oids `[1.3.6.1.2.1.1.3.0]`: no value", }, { name: "failed reachability check", - sysObjectIDError: fmt.Errorf("no sysobjectid"), - reachableGetNextError: fmt.Errorf("no value for GextNext"), + sysObjectIDError: errors.New("no sysobjectid"), + reachableGetNextError: errors.New("no value for GextNext"), valuesPacket: valuesPacketErrMock, - valuesError: fmt.Errorf("no value"), + valuesError: errors.New("no value"), expectedErr: "check device reachable: failed: no value for GextNext; failed to autodetect profile: failed to fetch sysobjectid: cannot get sysobjectid: no sysobjectid; failed to fetch values: failed to fetch scalar oids with batching: failed to fetch scalar oids: fetch scalar: error getting oids `[1.3.6.1.2.1.1.3.0]`: no value", }, } @@ -1274,7 +1275,7 @@ func TestCheck_Run_sessionCloseError(t *testing.T) { sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { return sess, nil } - sess.CloseErr = fmt.Errorf("close error") + sess.CloseErr = errors.New("close error") chk := Check{sessionFactory: sessionFactory} // language=yaml @@ -1505,7 +1506,7 @@ tags: } sess.On("GetNext", []string{"1.0"}).Return(&gosnmplib.MockValidReachableGetNextPacket, nil) var sysObjectIDPacket *gosnmp.SnmpPacket - sess.On("Get", []string{"1.3.6.1.2.1.1.2.0"}).Return(sysObjectIDPacket, fmt.Errorf("no value")) + sess.On("Get", []string{"1.3.6.1.2.1.1.2.0"}).Return(sysObjectIDPacket, errors.New("no value")) sess.On("Get", []string{ "1.3.6.1.2.1.1.1.0", @@ -1666,20 +1667,20 @@ tags: sender.On("Commit").Return() var nilPacket *gosnmp.SnmpPacket - sess.On("GetNext", []string{"1.0"}).Return(nilPacket, fmt.Errorf("no value for GetNext")) - sess.On("Get", []string{"1.3.6.1.2.1.1.2.0"}).Return(nilPacket, fmt.Errorf("no value")) + sess.On("GetNext", []string{"1.0"}).Return(nilPacket, errors.New("no value for GetNext")) + sess.On("Get", []string{"1.3.6.1.2.1.1.2.0"}).Return(nilPacket, errors.New("no value")) sess.On("Get", []string{ "1.3.6.1.2.1.1.1.0", "1.3.6.1.2.1.1.2.0", "1.3.6.1.2.1.1.3.0", "1.3.6.1.2.1.1.5.0", - }).Return(nilPacket, fmt.Errorf("device failure")) + }).Return(nilPacket, errors.New("device failure")) sess.On("Get", []string{ "1.3.6.1.2.1.1.1.0", "1.3.6.1.2.1.1.2.0", - }).Return(nilPacket, fmt.Errorf("device failure")) - sess.On("Get", []string{"1.3.6.1.2.1.1.1.0"}).Return(nilPacket, fmt.Errorf("device failure")) + }).Return(nilPacket, errors.New("device failure")) + sess.On("Get", []string{"1.3.6.1.2.1.1.1.0"}).Return(nilPacket, errors.New("device failure")) expectedErrMsg := "check device reachable: failed: no value for GetNext; failed to autodetect profile: failed to fetch sysobjectid: cannot get sysobjectid: no value; failed to fetch values: failed to fetch scalar oids with batching: failed to fetch scalar oids: fetch scalar: failed getting oids `[1.3.6.1.2.1.1.1.0]` using Get: device failure" @@ -2158,7 +2159,7 @@ metric_tags: sender.On("ServiceCheck", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return() sender.On("Commit").Return() - sess.On("Get", mock.Anything).Return(&gosnmp.SnmpPacket{}, fmt.Errorf("get error")) + sess.On("Get", mock.Anything).Return(&gosnmp.SnmpPacket{}, errors.New("get error")) err = chk.Run() assert.Nil(t, err) diff --git a/pkg/collector/corechecks/system/cpu/cpu/context_switches.go b/pkg/collector/corechecks/system/cpu/cpu/context_switches.go index beb800a481bcf7..5bf8cde7bdf38a 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/context_switches.go +++ b/pkg/collector/corechecks/system/cpu/cpu/context_switches.go @@ -7,10 +7,10 @@ package cpu -import "fmt" +import "errors" // GetContextSwitches retrieves the number of context switches for the current process. // It returns an integer representing the count and an error if the retrieval fails. func GetContextSwitches() (int64, error) { - return 0, fmt.Errorf("context switches not supported on macOS") + return 0, errors.New("context switches not supported on macOS") } diff --git a/pkg/collector/corechecks/system/cpu/cpu/context_switches_linux.go b/pkg/collector/corechecks/system/cpu/cpu/context_switches_linux.go index cf118b1dbfaa36..14a75f61a95f48 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/context_switches_linux.go +++ b/pkg/collector/corechecks/system/cpu/cpu/context_switches_linux.go @@ -9,12 +9,14 @@ package cpu import ( "bufio" + "errors" "fmt" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util/log" "os" "strconv" "strings" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/log" ) // GetContextSwitches retrieves the number of context switches for the current process. @@ -44,5 +46,5 @@ func GetContextSwitches() (ctxSwitches int64, err error) { return ctxSwitches, nil } } - return 0, fmt.Errorf("could not find the context switches in stat file") + return 0, errors.New("could not find the context switches in stat file") } diff --git a/pkg/collector/corechecks/system/cpu/cpu/cpu.go b/pkg/collector/corechecks/system/cpu/cpu/cpu.go index b3ec8ff29df684..1b1c0fadef6a9f 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/cpu.go +++ b/pkg/collector/corechecks/system/cpu/cpu/cpu.go @@ -8,7 +8,7 @@ package cpu import ( - "fmt" + "errors" "github.com/shirou/gopsutil/v4/cpu" "gopkg.in/yaml.v2" @@ -103,7 +103,7 @@ func (c *Check) reportCPUMetricsPercent(sender sender.Sender, numCores int32) (e } log.Debugf("getCPUTimes(false): %s", cpuTimes) if len(cpuTimes) == 0 { - err = fmt.Errorf("no cpu stats retrieve (empty results)") + err = errors.New("no cpu stats retrieve (empty results)") log.Errorf("%s", err.Error()) return err } @@ -148,7 +148,7 @@ func (c *Check) reportCPUMetricsTotal(sender sender.Sender) (err error) { } log.Debugf("getCPUTimes(%t): %s", c.instanceConfig.ReportTotalPerCPU, cpuTimes) for _, t := range cpuTimes { - tags := []string{fmt.Sprintf("core:%s", t.CPU)} + tags := []string{"core:" + t.CPU} sender.Gauge("system.cpu.user.total", t.User, "", tags) sender.Gauge("system.cpu.nice.total", t.Nice, "", tags) sender.Gauge("system.cpu.system.total", t.System, "", tags) diff --git a/pkg/collector/corechecks/system/disk/disk/disk_nix.go b/pkg/collector/corechecks/system/disk/disk/disk_nix.go index b8a7582fa459d1..489289a68c52c5 100644 --- a/pkg/collector/corechecks/system/disk/disk/disk_nix.go +++ b/pkg/collector/corechecks/system/disk/disk/disk_nix.go @@ -82,7 +82,7 @@ func (c *Check) collectPartitionMetrics(sender sender.Sender) error { tags := make([]string, 0, 2) if c.cfg.tagByFilesystem { - tags = append(tags, partition.Fstype, fmt.Sprintf("filesystem:%s", partition.Fstype)) + tags = append(tags, partition.Fstype, "filesystem:"+partition.Fstype) } var deviceName string if c.cfg.useMount { @@ -90,8 +90,8 @@ func (c *Check) collectPartitionMetrics(sender sender.Sender) error { } else { deviceName = partition.Device } - tags = append(tags, fmt.Sprintf("device:%s", deviceName)) - tags = append(tags, fmt.Sprintf("device_name:%s", filepath.Base(partition.Device))) + tags = append(tags, "device:"+deviceName) + tags = append(tags, "device_name:"+filepath.Base(partition.Device)) tags = c.applyDeviceTags(partition.Device, partition.Mountpoint, tags) @@ -109,8 +109,8 @@ func (c *Check) collectDiskMetrics(sender sender.Sender) error { for deviceName, ioCounter := range iomap { tags := []string{} - tags = append(tags, fmt.Sprintf("device:%s", deviceName)) - tags = append(tags, fmt.Sprintf("device_name:%s", deviceName)) + tags = append(tags, "device:"+deviceName) + tags = append(tags, "device_name:"+deviceName) tags = c.applyDeviceTags(deviceName, "", tags) diff --git a/pkg/collector/corechecks/system/disk/diskv2/disk.go b/pkg/collector/corechecks/system/disk/diskv2/disk.go index 399c314d380a2d..b9cc3b8d87f524 100644 --- a/pkg/collector/corechecks/system/disk/diskv2/disk.go +++ b/pkg/collector/corechecks/system/disk/diskv2/disk.go @@ -105,7 +105,7 @@ func sliceMatchesExpression(slice []regexp.Regexp, expression string) bool { func compileRegExp(expr string, ignoreCase bool) (*regexp.Regexp, error) { if ignoreCase { - expr = fmt.Sprintf("(?i)%s", expr) + expr = "(?i)" + expr } re, err := regexp.Compile(expr) if err != nil { @@ -565,7 +565,7 @@ func (c *Check) getPartitionUsage(partition gopsutil_disk.PartitionStat) *gopsut func (c *Check) getPartitionTags(partition gopsutil_disk.PartitionStat) []string { tags := []string{} if c.instanceConfig.TagByFilesystem { - tags = append(tags, partition.Fstype, fmt.Sprintf("filesystem:%s", partition.Fstype)) + tags = append(tags, partition.Fstype, "filesystem:"+partition.Fstype) } var deviceName string if c.instanceConfig.UseMount { @@ -574,15 +574,15 @@ func (c *Check) getPartitionTags(partition gopsutil_disk.PartitionStat) []string deviceName = partition.Device } if c.instanceConfig.LowercaseDeviceTag { - tags = append(tags, fmt.Sprintf("device:%s", strings.ToLower(deviceName))) + tags = append(tags, "device:"+strings.ToLower(deviceName)) } else { - tags = append(tags, fmt.Sprintf("device:%s", deviceName)) + tags = append(tags, "device:"+deviceName) } - tags = append(tags, fmt.Sprintf("device_name:%s", baseDeviceName(partition.Device))) + tags = append(tags, "device_name:"+baseDeviceName(partition.Device)) tags = append(tags, c.getDeviceTags(deviceName)...) label, ok := c.deviceLabels[partition.Device] if ok { - tags = append(tags, fmt.Sprintf("label:%s", label), fmt.Sprintf("device_label:%s", label)) + tags = append(tags, "label:"+label, "device_label:"+label) } return tags } @@ -590,15 +590,15 @@ func (c *Check) getPartitionTags(partition gopsutil_disk.PartitionStat) []string func (c *Check) getDeviceNameTags(deviceName string) []string { tags := []string{} if c.instanceConfig.LowercaseDeviceTag { - tags = append(tags, fmt.Sprintf("device:%s", strings.ToLower(deviceName))) + tags = append(tags, "device:"+strings.ToLower(deviceName)) } else { - tags = append(tags, fmt.Sprintf("device:%s", deviceName)) + tags = append(tags, "device:"+deviceName) } - tags = append(tags, fmt.Sprintf("device_name:%s", baseDeviceName(deviceName))) + tags = append(tags, "device_name:"+baseDeviceName(deviceName)) tags = append(tags, c.getDeviceTags(deviceName)...) label, ok := c.deviceLabels[deviceName] if ok { - tags = append(tags, fmt.Sprintf("label:%s", label), fmt.Sprintf("device_label:%s", label)) + tags = append(tags, "label:"+label, "device_label:"+label) } return tags } diff --git a/pkg/collector/corechecks/system/disk/diskv2/disk_nix.go b/pkg/collector/corechecks/system/disk/diskv2/disk_nix.go index 1ece25b28b25f1..27b114bf6814b9 100644 --- a/pkg/collector/corechecks/system/disk/diskv2/disk_nix.go +++ b/pkg/collector/corechecks/system/disk/diskv2/disk_nix.go @@ -10,6 +10,7 @@ package diskv2 import ( "bufio" "encoding/xml" + "errors" "fmt" "io" "os" @@ -375,7 +376,7 @@ func (r *rootFsDeviceFinder) Find() (string, error) { } } - return "", fmt.Errorf("could not determine rootfs device") + return "", errors.New("could not determine rootfs device") } // ReadlinkFs method diff --git a/pkg/collector/corechecks/system/disk/io/iostats_nix.go b/pkg/collector/corechecks/system/disk/io/iostats_nix.go index e3813f162da9e7..efc17872c55f5a 100644 --- a/pkg/collector/corechecks/system/disk/io/iostats_nix.go +++ b/pkg/collector/corechecks/system/disk/io/iostats_nix.go @@ -8,7 +8,6 @@ package io import ( - "fmt" "math" "regexp" "time" @@ -94,11 +93,11 @@ func (c *IOCheck) nixIO() error { } tags := []string{} - tags = append(tags, fmt.Sprintf("device:%s", device)) - tags = append(tags, fmt.Sprintf("device_name:%s", device)) + tags = append(tags, "device:"+device) + tags = append(tags, "device_name:"+device) if ioStats.Label != "" { - tags = append(tags, fmt.Sprintf("device_label:%s", ioStats.Label)) + tags = append(tags, "device_label:"+ioStats.Label) } sender.Rate("system.io.r_s", float64(ioStats.ReadCount), "", tags) diff --git a/pkg/collector/corechecks/system/memory/memory_nix.go b/pkg/collector/corechecks/system/memory/memory_nix.go index 577fb222bb97b6..3167700c1b3754 100644 --- a/pkg/collector/corechecks/system/memory/memory_nix.go +++ b/pkg/collector/corechecks/system/memory/memory_nix.go @@ -8,7 +8,7 @@ package memory import ( - "fmt" + "errors" "runtime" "github.com/shirou/gopsutil/v4/mem" @@ -74,7 +74,7 @@ func (c *Check) Run() error { } if errVirt != nil && errSwap != nil { - return fmt.Errorf("failed to gather any memory information") + return errors.New("failed to gather any memory information") } sender.Commit() diff --git a/pkg/collector/corechecks/system/memory/memory_nix_test.go b/pkg/collector/corechecks/system/memory/memory_nix_test.go index e8be54dbb93a9b..9eeae1f4a02f8f 100644 --- a/pkg/collector/corechecks/system/memory/memory_nix_test.go +++ b/pkg/collector/corechecks/system/memory/memory_nix_test.go @@ -8,7 +8,7 @@ package memory import ( - "fmt" + "errors" "testing" "github.com/shirou/gopsutil/v4/mem" @@ -164,8 +164,8 @@ func TestMemoryCheckDarwin(t *testing.T) { } func TestMemoryError(t *testing.T) { - virtualMemory = func() (*mem.VirtualMemoryStat, error) { return nil, fmt.Errorf("some error") } - swapMemory = func() (*mem.SwapMemoryStat, error) { return nil, fmt.Errorf("some error") } + virtualMemory = func() (*mem.VirtualMemoryStat, error) { return nil, errors.New("some error") } + swapMemory = func() (*mem.SwapMemoryStat, error) { return nil, errors.New("some error") } memCheck := new(Check) mock := mocksender.NewMockSender(memCheck.ID()) @@ -183,7 +183,7 @@ func TestMemoryError(t *testing.T) { func TestSwapMemoryError(t *testing.T) { virtualMemory = VirtualMemory - swapMemory = func() (*mem.SwapMemoryStat, error) { return nil, fmt.Errorf("some error") } + swapMemory = func() (*mem.SwapMemoryStat, error) { return nil, errors.New("some error") } memCheck := new(Check) mock := mocksender.NewMockSender(memCheck.ID()) @@ -217,7 +217,7 @@ func TestSwapMemoryError(t *testing.T) { } func TestVirtualMemoryError(t *testing.T) { - virtualMemory = func() (*mem.VirtualMemoryStat, error) { return nil, fmt.Errorf("some error") } + virtualMemory = func() (*mem.VirtualMemoryStat, error) { return nil, errors.New("some error") } swapMemory = SwapMemory memCheck := new(Check) diff --git a/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go b/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go index a68f7f9e8f9b46..357f1a98d3e756 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go +++ b/pkg/collector/corechecks/system/wincrashdetect/probe/crashparse.go @@ -158,7 +158,7 @@ func parseWinCrashDump(wcs *WinCrashStatus) { } if len(ctx.loglines) < 2 { - wcs.ErrString = fmt.Sprintf("Invalid crash dump file %s", wcs.FileName) + wcs.ErrString = "Invalid crash dump file " + wcs.FileName wcs.StatusCode = WinCrashStatusCodeFailed return } diff --git a/pkg/collector/corechecks/system/wincrashdetect/probe/wincrashprobe.go b/pkg/collector/corechecks/system/wincrashdetect/probe/wincrashprobe.go index 08ff3a65c3c41e..cc59c19d28d91f 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/probe/wincrashprobe.go +++ b/pkg/collector/corechecks/system/wincrashdetect/probe/wincrashprobe.go @@ -8,6 +8,7 @@ package probe import ( + "errors" "fmt" "os" "path/filepath" @@ -197,7 +198,7 @@ func (wcs *WinCrashStatus) getCurrentCrashSettings() error { // kernel, complete, automatic, active fn, _, err := k.GetStringValue("DumpFile") if err != nil { - return fmt.Errorf("Error reading dump file name") + return errors.New("Error reading dump file name") } fn, err = winutil.ExpandEnvironmentStrings(fn) if err != nil { diff --git a/pkg/collector/corechecks/system/windowscertificate/windows_certificate.go b/pkg/collector/corechecks/system/windowscertificate/windows_certificate.go index 8b536258c47152..97c6a07dc818d6 100644 --- a/pkg/collector/corechecks/system/windowscertificate/windows_certificate.go +++ b/pkg/collector/corechecks/system/windowscertificate/windows_certificate.go @@ -11,6 +11,7 @@ package windowscertificate import ( "crypto/x509" "encoding/json" + "errors" "fmt" "os" "strings" @@ -174,7 +175,7 @@ func (w *WinCertChk) Configure(senderManager sender.SenderManager, integrationCo log.Errorf("configuration error: %s (%v)", err, err.Value()) } } - return fmt.Errorf("configuration validation failed") + return errors.New("configuration validation failed") } config := Config{ @@ -254,7 +255,7 @@ func (w *WinCertChk) Run() error { servicecheck.ServiceCheckCritical, "", tags, - fmt.Sprintf("Certificate has expired. Certificate expiration date is %s", expirationDate)) + "Certificate has expired. Certificate expiration date is "+expirationDate) } else if daysRemaining < float64(w.config.DaysCritical) { sender.ServiceCheck("windows_certificate.cert_expiration", servicecheck.ServiceCheckCritical, @@ -282,10 +283,10 @@ func (w *WinCertChk) Run() error { if cert.TrustStatusError != 0 { log.Debugf("Certificate %s has trust status error: %d", cert.Certificate.Subject.String(), cert.TrustStatusError) trustStatusErrors := getCertChainTrustStatusErrors(cert.TrustStatusError) - message := fmt.Sprintf("Certificate Validation failed. The certificates in the certificate chain have the following errors: %s", strings.Join(trustStatusErrors, ", ")) + message := "Certificate Validation failed. The certificates in the certificate chain have the following errors: " + strings.Join(trustStatusErrors, ", ") if cert.ChainPolicyError != 0 { chainPolicyError := getCertChainPolicyErrors(cert.ChainPolicyError) - message = fmt.Sprintf("%s, %s", message, chainPolicyError) + message = message + ", " + chainPolicyError } sender.ServiceCheck("windows_certificate.cert_chain_validation", servicecheck.ServiceCheckCritical, @@ -334,7 +335,7 @@ func (w *WinCertChk) Run() error { servicecheck.ServiceCheckCritical, "", crlTags, - fmt.Sprintf("CRL has expired. CRL expiration date is %s", crlExpirationDate)) + "CRL has expired. CRL expiration date is "+crlExpirationDate) } else if crlDaysRemaining < float64(w.config.CrlDaysWarning) { sender.ServiceCheck("windows_certificate.crl_expiration", servicecheck.ServiceCheckWarning, diff --git a/pkg/collector/corechecks/system/windowscertificate/windows_certificate_util.go b/pkg/collector/corechecks/system/windowscertificate/windows_certificate_util.go index 91e071f906f2c3..f89cf3286de19c 100644 --- a/pkg/collector/corechecks/system/windowscertificate/windows_certificate_util.go +++ b/pkg/collector/corechecks/system/windowscertificate/windows_certificate_util.go @@ -11,6 +11,7 @@ package windowscertificate import ( "crypto/x509" "encoding/hex" + "errors" "fmt" "strings" "time" @@ -88,7 +89,7 @@ func getCertThumbprint(certContext *windows.CertContext) (string, error) { return "", err } if pcbData == 0 { - return "", fmt.Errorf("certificate has no SHA-1 Thumbprint") + return "", errors.New("certificate has no SHA-1 Thumbprint") } pvData := make([]byte, pcbData) @@ -110,7 +111,7 @@ func getCrlThumbprint(pCrlContext *winutil.CRLContext) (string, error) { return "", err } if pcbData == 0 { - return "", fmt.Errorf("CRL has no SHA-1 Thumbprint") + return "", errors.New("CRL has no SHA-1 Thumbprint") } pvData := make([]byte, pcbData) diff --git a/pkg/collector/corechecks/systemd/dbus_conn.go b/pkg/collector/corechecks/systemd/dbus_conn.go index 4f8947325d20c7..372fe57e504247 100644 --- a/pkg/collector/corechecks/systemd/dbus_conn.go +++ b/pkg/collector/corechecks/systemd/dbus_conn.go @@ -14,7 +14,6 @@ package systemd import ( - "fmt" "os" "strconv" @@ -31,7 +30,7 @@ func NewSystemdConnection(privateSocket string) (*dbus.Conn, error) { return dbus.NewConnection(func() (*godbus.Conn, error) { // We skip Hello when talking directly to systemd. return dbusAuthConnection(func() (*godbus.Conn, error) { - return godbus.Dial(fmt.Sprintf("unix:path=%s", privateSocket)) + return godbus.Dial("unix:path=" + privateSocket) }) }) } diff --git a/pkg/collector/corechecks/systemd/systemd.go b/pkg/collector/corechecks/systemd/systemd.go index c8fadf8a9ec735..bb0b927b572c35 100644 --- a/pkg/collector/corechecks/systemd/systemd.go +++ b/pkg/collector/corechecks/systemd/systemd.go @@ -556,7 +556,7 @@ func (c *SystemdCheck) Configure(senderManager sender.SenderManager, integration } if len(c.config.instance.UnitNames) == 0 && len(c.config.instance.UnitRegexes) == 0 { - return fmt.Errorf("please set either `unit_names` or `unit_regexes` in the instance config") + return errors.New("please set either `unit_names` or `unit_regexes` in the instance config") } for _, regex := range c.config.instance.UnitRegexes { diff --git a/pkg/collector/corechecks/systemd/systemd_test.go b/pkg/collector/corechecks/systemd/systemd_test.go index c9f2404292a8e6..cb15b611cf34b2 100644 --- a/pkg/collector/corechecks/systemd/systemd_test.go +++ b/pkg/collector/corechecks/systemd/systemd_test.go @@ -16,6 +16,7 @@ import ( "github.com/coreos/go-systemd/v22/dbus" godbus "github.com/godbus/dbus/v5" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -206,7 +207,7 @@ private_socket: /tmp/foo/private_socket func TestPrivateSocketConnectionErrorCase(t *testing.T) { stats := &mockSystemdStats{} - stats.On("PrivateSocketConnection", mock.Anything).Return((*dbus.Conn)(nil), fmt.Errorf("some error")) + stats.On("PrivateSocketConnection", mock.Anything).Return((*dbus.Conn)(nil), errors.New("some error")) rawInstanceConfig := []byte(` unit_names: @@ -226,7 +227,7 @@ private_socket: /tmp/foo/private_socket func TestDefaultPrivateSocketConnection(t *testing.T) { stats := &mockSystemdStats{} - stats.On("SystemBusSocketConnection").Return((*dbus.Conn)(nil), fmt.Errorf("some error")) + stats.On("SystemBusSocketConnection").Return((*dbus.Conn)(nil), errors.New("some error")) stats.On("PrivateSocketConnection", mock.Anything).Return(&dbus.Conn{}, nil) rawInstanceConfig := []byte(` @@ -287,7 +288,7 @@ unit_names: func TestDefaultDockerAgentSystemBusSocketConnectionNotCalled(t *testing.T) { t.Setenv("DOCKER_DD_AGENT", "true") stats := &mockSystemdStats{} - stats.On("PrivateSocketConnection", mock.Anything).Return((*dbus.Conn)(nil), fmt.Errorf("some error")) + stats.On("PrivateSocketConnection", mock.Anything).Return((*dbus.Conn)(nil), errors.New("some error")) stats.On("SystemBusSocketConnection").Return(&dbus.Conn{}, nil) rawInstanceConfig := []byte(` @@ -307,8 +308,8 @@ unit_names: func TestDbusConnectionErr(t *testing.T) { stats := &mockSystemdStats{} - stats.On("PrivateSocketConnection", mock.Anything).Return((*dbus.Conn)(nil), fmt.Errorf("some error")) - stats.On("SystemBusSocketConnection").Return((*dbus.Conn)(nil), fmt.Errorf("some error")) + stats.On("PrivateSocketConnection", mock.Anything).Return((*dbus.Conn)(nil), errors.New("some error")) + stats.On("SystemBusSocketConnection").Return((*dbus.Conn)(nil), errors.New("some error")) check := SystemdCheck{stats: stats} senderManager := mocksender.CreateDefaultDemultiplexer() @@ -327,7 +328,7 @@ func TestDbusConnectionErr(t *testing.T) { func TestSystemStateCallFailGracefully(t *testing.T) { stats := &mockSystemdStats{} stats.On("SystemBusSocketConnection").Return(&dbus.Conn{}, nil) - stats.On("SystemState", mock.Anything).Return((*dbus.Property)(nil), fmt.Errorf("some error")) + stats.On("SystemState", mock.Anything).Return((*dbus.Property)(nil), errors.New("some error")) stats.On("ListUnits", mock.Anything).Return([]dbus.UnitStatus{}, nil) stats.On("GetVersion", mock.Anything).Return(systemdVersion) @@ -348,7 +349,7 @@ func TestSystemStateCallFailGracefully(t *testing.T) { func TestListUnitErr(t *testing.T) { stats := createDefaultMockSystemdStats() - stats.On("ListUnits", mock.Anything).Return(([]dbus.UnitStatus)(nil), fmt.Errorf("some error")) + stats.On("ListUnits", mock.Anything).Return(([]dbus.UnitStatus)(nil), errors.New("some error")) stats.On("GetVersion", mock.Anything).Return(systemdVersion) check := SystemdCheck{stats: stats} @@ -774,7 +775,7 @@ unit_regexes: [%s] // Then mockSender.AssertCalled(t, "ServiceCheck", canConnectServiceCheck, servicecheck.ServiceCheckOK, "", []string(nil), mock.Anything) for unitName, metrics := range unitsToMetrics { - tags := []string{fmt.Sprintf("unit:%s", unitName)} + tags := []string{"unit:" + unitName} assertSenderCall := mockSender.AssertNotCalled if slices.Contains(test.monitoredUnits, unitName) { assertSenderCall = mockSender.AssertCalled @@ -1095,20 +1096,24 @@ func TestGetPropertyUint64(t *testing.T) { data := map[string]struct { propertyName string expectedNumber uint64 - expectedError error + expectedError string }{ - "prop_uint property retrieved": {"prop_uint", 3, nil}, - "uint32 property retrieved": {"prop_uint32", 5, nil}, - "uint64 property retrieved": {"prop_uint64", 10, nil}, - "error int64 not valid": {"prop_int64", 0, fmt.Errorf("property prop_int64 (int64) cannot be converted to uint64")}, - "error string not valid": {"prop_string", 0, fmt.Errorf("property prop_string (string) cannot be converted to uint64")}, - "error prop not exist": {"prop_not_exist", 0, fmt.Errorf("property prop_not_exist not found")}, + "prop_uint property retrieved": {"prop_uint", 3, ""}, + "uint32 property retrieved": {"prop_uint32", 5, ""}, + "uint64 property retrieved": {"prop_uint64", 10, ""}, + "error int64 not valid": {"prop_int64", 0, "property prop_int64 (int64) cannot be converted to uint64"}, + "error string not valid": {"prop_string", 0, "property prop_string (string) cannot be converted to uint64"}, + "error prop not exist": {"prop_not_exist", 0, "property prop_not_exist not found"}, } for name, d := range data { t.Run(name, func(t *testing.T) { num, err := getPropertyUint64(properties, d.propertyName) assert.Equal(t, d.expectedNumber, num) - assert.Equal(t, d.expectedError, err) + if d.expectedError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, d.expectedError) + } }) } } @@ -1122,17 +1127,21 @@ func TestGetPropertyString(t *testing.T) { data := map[string]struct { propertyName string expectedString string - expectedError error + expectedError string }{ - "valid string": {"prop_string", "foo bar", nil}, - "prop_uint not valid": {"prop_uint", "", fmt.Errorf("property prop_uint (uint) cannot be converted to string")}, - "error prop not exist": {"prop_not_exist", "", fmt.Errorf("property prop_not_exist not found")}, + "valid string": {"prop_string", "foo bar", ""}, + "prop_uint not valid": {"prop_uint", "", "property prop_uint (uint) cannot be converted to string"}, + "error prop not exist": {"prop_not_exist", "", "property prop_not_exist not found"}, } for name, d := range data { t.Run(name, func(t *testing.T) { num, err := getPropertyString(properties, d.propertyName) assert.Equal(t, d.expectedString, num) - assert.Equal(t, d.expectedError, err) + if d.expectedError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, d.expectedError) + } }) } } @@ -1147,18 +1156,22 @@ func TestGetPropertyBool(t *testing.T) { data := map[string]struct { propertyName string expectedBoolValue bool - expectedError error + expectedError string }{ - "valid bool true": {"prop_bool_true", true, nil}, - "valid bool false": {"prop_bool_false", false, nil}, - "prop_uint not valid": {"prop_uint", false, fmt.Errorf("property prop_uint (uint) cannot be converted to bool")}, - "error prop not exist": {"prop_not_exist", false, fmt.Errorf("property prop_not_exist not found")}, + "valid bool true": {"prop_bool_true", true, ""}, + "valid bool false": {"prop_bool_false", false, ""}, + "prop_uint not valid": {"prop_uint", false, "property prop_uint (uint) cannot be converted to bool"}, + "error prop not exist": {"prop_not_exist", false, "property prop_not_exist not found"}, } for name, d := range data { t.Run(name, func(t *testing.T) { num, err := getPropertyBool(properties, d.propertyName) assert.Equal(t, d.expectedBoolValue, num) - assert.Equal(t, d.expectedError, err) + if d.expectedError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, d.expectedError) + } }) } } diff --git a/pkg/collector/python/datadog_agent.go b/pkg/collector/python/datadog_agent.go index 53803587f07203..69ebce2be4b88f 100644 --- a/pkg/collector/python/datadog_agent.go +++ b/pkg/collector/python/datadog_agent.go @@ -674,7 +674,7 @@ func EmitAgentTelemetry(checkName *C.char, metricName *C.char, metricValue C.dou func httpHeaders() map[string]string { av, _ := version.Agent() return map[string]string{ - "User-Agent": fmt.Sprintf("Datadog Agent/%s", av.GetNumber()), + "User-Agent": "Datadog Agent/" + av.GetNumber(), "Content-Type": "application/x-www-form-urlencoded", "Accept": "text/html, */*", } diff --git a/pkg/collector/python/helpers.go b/pkg/collector/python/helpers.go index ebc1f121436c77..2a5b1162bdacf5 100644 --- a/pkg/collector/python/helpers.go +++ b/pkg/collector/python/helpers.go @@ -8,6 +8,7 @@ package python import ( + "errors" "fmt" "runtime" "unsafe" @@ -95,7 +96,7 @@ func newStickyLock() (*stickyLock, error) { // Ensure that rtloader isn't destroyed while we are trying to acquire GIL if rtloader == nil { - return nil, fmt.Errorf("error acquiring the GIL: rtloader is not initialized") + return nil, errors.New("error acquiring the GIL: rtloader is not initialized") } state := C.ensure_gil(rtloader) diff --git a/pkg/collector/python/init.go b/pkg/collector/python/init.go index 379d7a8cd3a311..05b6b1f29d3159 100644 --- a/pkg/collector/python/init.go +++ b/pkg/collector/python/init.go @@ -395,7 +395,7 @@ func Initialize(paths ...string) error { if rtloader == nil { err := addExpvarPythonInitErrors( - fmt.Sprintf("could not load runtime python for version 3: %s", C.GoString(pyErr)), + "could not load runtime python for version 3: " + C.GoString(pyErr), ) if pyErr != nil { // pyErr tracked when created in rtloader @@ -439,7 +439,7 @@ func Initialize(paths ...string) error { // Init RtLoader machinery if C.init(rtloader) == 0 { - err := fmt.Sprintf("could not initialize rtloader: %s", C.GoString(C.get_error(rtloader))) + err := "could not initialize rtloader: " + C.GoString(C.get_error(rtloader)) return addExpvarPythonInitErrors(err) } diff --git a/pkg/collector/python/loader.go b/pkg/collector/python/loader.go index 1bda345b94445a..90037d985b6421 100644 --- a/pkg/collector/python/loader.go +++ b/pkg/collector/python/loader.go @@ -11,6 +11,7 @@ import ( "errors" "expvar" "fmt" + "strconv" "strings" "sync" "unsafe" @@ -127,7 +128,7 @@ func (cl *PythonCheckLoader) Load(senderManager sender.SenderManager, config int } if rtloader == nil { - return nil, fmt.Errorf("python is not initialized") + return nil, errors.New("python is not initialized") } moduleName := config.Name // FastDigest is used as check id calculation does not account for tags order @@ -165,7 +166,7 @@ func (cl *PythonCheckLoader) Load(senderManager sender.SenderManager, config int moduleName := TrackedCString(name) defer C._free(unsafe.Pointer(moduleName)) if res := C.get_class(rtloader, moduleName, &checkModule, &checkClass); res != 0 { - if strings.HasPrefix(name, fmt.Sprintf("%s.", wheelNamespace)) { + if strings.HasPrefix(name, wheelNamespace+".") { loadedAsWheel = true } break @@ -176,7 +177,7 @@ func (cl *PythonCheckLoader) Load(senderManager sender.SenderManager, config int loadErrors = append(loadErrors, fmt.Sprintf("unable to load python module %s: %v", name, err)) } else { log.Debugf("Unable to load python module - %s", name) - loadErrors = append(loadErrors, fmt.Sprintf("unable to load python module %s", name)) + loadErrors = append(loadErrors, "unable to load python module "+name) } } @@ -241,7 +242,7 @@ func (cl *PythonCheckLoader) Load(senderManager sender.SenderManager, config int configSource := config.Source if instanceIndex >= 0 { - configSource = fmt.Sprintf("%s[%d]", configSource, instanceIndex) + configSource = configSource + "[" + strconv.Itoa(instanceIndex) + "]" } // The GIL should be unlocked at this point, `check.Configure` uses its own stickyLock and stickyLocks must not be nested if err := c.Configure(senderManager, configDigest, instance, config.InitConfig, configSource); err != nil { @@ -322,8 +323,8 @@ func reportPy3Warnings(checkName string, checkFilePath string) { // add a serie to the aggregator to be sent on every flush tags := []string{ - fmt.Sprintf("status:%s", status), - fmt.Sprintf("check_name:%s", checkName), + "status:" + status, + "check_name:" + checkName, } tags = append(tags, agentVersionTags...) aggregator.AddRecurrentSeries(&metrics.Serie{ diff --git a/pkg/collector/python/test_check.go b/pkg/collector/python/test_check.go index e8e6279956cd78..0601baed2ced82 100644 --- a/pkg/collector/python/test_check.go +++ b/pkg/collector/python/test_check.go @@ -8,7 +8,7 @@ package python import ( - "fmt" + "errors" "runtime" "testing" "time" @@ -230,7 +230,7 @@ func testRunCheck(t *testing.T) { assert.Equal(t, C.int(1), C.get_checks_warnings_calls) assert.Equal(t, check.instance, C.run_check_instance) - assert.Equal(t, check.lastWarnings, []error{fmt.Errorf("warn1"), fmt.Errorf("warn2")}) + assert.Equal(t, check.lastWarnings, []error{errors.New("warn1"), errors.New("warn2")}) } func testRunCheckWithRuntimeNotInitializedError(t *testing.T) { @@ -443,7 +443,7 @@ func testRunErrorNil(t *testing.T) { errStr := check.runCheck(false) assert.NotNil(t, errStr) - assert.NotNil(t, fmt.Errorf("some error"), errStr) + assert.NotNil(t, errors.New("some error"), errStr) assert.Equal(t, C.int(1), C.gil_locked_calls) assert.Equal(t, C.int(1), C.gil_unlocked_calls) @@ -468,7 +468,7 @@ func testRunErrorReturn(t *testing.T) { errStr := check.runCheck(false) assert.NotNil(t, errStr) - assert.NotNil(t, fmt.Errorf("not OK"), errStr) + assert.NotNil(t, errors.New("not OK"), errStr) assert.Equal(t, C.int(1), C.gil_locked_calls) assert.Equal(t, C.int(1), C.gil_unlocked_calls) diff --git a/pkg/collector/runner/runner_test.go b/pkg/collector/runner/runner_test.go index b79cd6a4aaf86b..fdbddbcd69a6fd 100644 --- a/pkg/collector/runner/runner_test.go +++ b/pkg/collector/runner/runner_test.go @@ -7,6 +7,7 @@ package runner import ( "context" + "errors" "fmt" "sync" "testing" @@ -71,7 +72,7 @@ func (c *testCheck) StartedChan() chan struct{} { func (c *testCheck) GetWarnings() []error { if c.doWarn { - return []error{fmt.Errorf("Warning")} + return []error{errors.New("Warning")} } return []error{} @@ -91,7 +92,7 @@ func (c *testCheck) Run() error { c.runCount.Inc() if c.doErr { - return fmt.Errorf("myerror") + return errors.New("myerror") } return nil diff --git a/pkg/collector/worker/utilization.go b/pkg/collector/worker/utilization.go index b8f2e977bf91f9..1c5d1c0dc063bd 100644 --- a/pkg/collector/worker/utilization.go +++ b/pkg/collector/worker/utilization.go @@ -6,6 +6,7 @@ package worker import ( + "errors" "expvar" "fmt" @@ -40,7 +41,7 @@ func (m *UtilizationMonitor) GetWorkerUtilization(workerName string) (float64, e // Get instances map using the new getter function instancesMap := expvars.GetWorkerInstances() if instancesMap == nil { - return 0.0, fmt.Errorf("worker instances not found in expvars") + return 0.0, errors.New("worker instances not found in expvars") } // Look for the specific worker @@ -64,7 +65,7 @@ func (m *UtilizationMonitor) GetAllWorkerUtilizations() (map[string]float64, err // Get instances map using the new getter function instancesMap := expvars.GetWorkerInstances() if instancesMap == nil { - return nil, fmt.Errorf("worker instances not found in expvars") + return nil, errors.New("worker instances not found in expvars") } // Add all data to the return map diff --git a/pkg/collector/worker/worker.go b/pkg/collector/worker/worker.go index 3cd0d1e0cb0f24..3d94ae1649d713 100644 --- a/pkg/collector/worker/worker.go +++ b/pkg/collector/worker/worker.go @@ -7,6 +7,7 @@ package worker import ( "context" + "errors" "fmt" "time" @@ -69,15 +70,15 @@ func NewWorker( ) (*Worker, error) { if checksTracker == nil { - return nil, fmt.Errorf("worker cannot initialize using a nil checksTracker") + return nil, errors.New("worker cannot initialize using a nil checksTracker") } if pendingChecksChan == nil { - return nil, fmt.Errorf("worker cannot initialize using a nil pendingChecksChan") + return nil, errors.New("worker cannot initialize using a nil pendingChecksChan") } if shouldAddCheckStatsFunc == nil { - return nil, fmt.Errorf("worker cannot initialize using a nil shouldAddCheckStatsFunc") + return nil, errors.New("worker cannot initialize using a nil shouldAddCheckStatsFunc") } return newWorkerWithOptions( @@ -107,7 +108,7 @@ func newWorkerWithOptions( ) (*Worker, error) { if getDefaultSenderFunc == nil { - return nil, fmt.Errorf("worker cannot initialize using a nil getDefaultSenderFunc") + return nil, errors.New("worker cannot initialize using a nil getDefaultSenderFunc") } workerName := fmt.Sprintf("worker_%d", ID) @@ -175,7 +176,7 @@ func (w *Worker) Run() { if err != nil { log.Errorf("Error getting default sender: %v. Not sending status check for %s", err, check) } - serviceCheckTags := []string{fmt.Sprintf("check:%s", check.String()), "dd_enable_check_intake:true"} + serviceCheckTags := []string{"check:" + check.String(), "dd_enable_check_intake:true"} serviceCheckStatus := servicecheck.ServiceCheckOK hname, _ := hostname.Get(context.TODO()) diff --git a/pkg/collector/worker/worker_test.go b/pkg/collector/worker/worker_test.go index 005a6690bf4851..a530c8fa877c80 100644 --- a/pkg/collector/worker/worker_test.go +++ b/pkg/collector/worker/worker_test.go @@ -6,6 +6,7 @@ package worker import ( + "errors" "expvar" "fmt" "sync" @@ -59,7 +60,7 @@ func (c *testCheck) Interval() time.Duration { func (c *testCheck) GetWarnings() []error { if c.doWarn { - return []error{fmt.Errorf("Warning")} + return []error{errors.New("Warning")} } return []error{} @@ -76,7 +77,7 @@ func (c *testCheck) Run() error { defer c.Unlock() if c.doErr { - return fmt.Errorf("myerror") + return errors.New("myerror") } return nil @@ -615,7 +616,7 @@ func TestWorkerSenderNil(t *testing.T) { checksTracker, mockShouldAddStatsFunc, func() (sender.Sender, error) { - return nil, fmt.Errorf("testerr") + return nil, errors.New("testerr") }, haagentmock.NewMockHaAgent(), pollingInterval, diff --git a/pkg/compliance/agent.go b/pkg/compliance/agent.go index 786566cddb4a61..0e7a03d5b04a6b 100644 --- a/pkg/compliance/agent.go +++ b/pkg/compliance/agent.go @@ -11,6 +11,7 @@ package compliance import ( "context" "encoding/binary" + "errors" "expvar" "fmt" "hash/fnv" @@ -482,7 +483,7 @@ func (a *Agent) runDBConfigurationsExport(ctx context.Context) { func (a *Agent) reportDBConfigurationFromSystemProbe(ctx context.Context, containerID utils.ContainerID, pid int32) error { if a.opts.SysProbeClient == nil { - return fmt.Errorf("system-probe socket client was not created") + return errors.New("system-probe socket client was not created") } resource, err := a.opts.SysProbeClient.FetchDBConfig(ctx, pid) diff --git a/pkg/compliance/cli/check.go b/pkg/compliance/cli/check.go index 6e9660af6408bd..9b220d29438e41 100644 --- a/pkg/compliance/cli/check.go +++ b/pkg/compliance/cli/check.go @@ -107,7 +107,7 @@ func RunCheck(log log.Component, config config.Component, _ secrets.Component, s if checkArgs.File != "" { benchDir, benchGlob = filepath.Dir(checkArgs.File), filepath.Base(checkArgs.File) } else if checkArgs.Framework != "" { - benchDir, benchGlob = configDir, fmt.Sprintf("%s.yaml", checkArgs.Framework) + benchDir, benchGlob = configDir, checkArgs.Framework+".yaml" } else { ruleFilter = compliance.MakeDefaultRuleFilter(ipc) benchDir, benchGlob = configDir, "*.yaml" diff --git a/pkg/compliance/cli/load.go b/pkg/compliance/cli/load.go index 2ebd09c17de568..89af93f0a70bf0 100644 --- a/pkg/compliance/cli/load.go +++ b/pkg/compliance/cli/load.go @@ -11,6 +11,7 @@ package cli import ( "context" "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -51,7 +52,7 @@ func RunLoad(_ log.Component, _ config.Component, loadArgs *LoadParams) error { resourceType, resource = aptconfig.LoadConfiguration(ctx, hostroot) case "db", "database": if loadArgs.ProcPid == 0 { - return fmt.Errorf("missing required flag --proc-pid") + return errors.New("missing required flag --proc-pid") } proc, _, rootPath, err := getProcMeta(hostroot, int32(loadArgs.ProcPid)) if err != nil { diff --git a/pkg/compliance/compliance.go b/pkg/compliance/compliance.go index 69332b03487673..3cc15b0087fac9 100644 --- a/pkg/compliance/compliance.go +++ b/pkg/compliance/compliance.go @@ -102,7 +102,7 @@ func StartCompliance(log log.Component, func sendRunningMetrics(statsdClient ddgostatsd.ClientInterface, moduleName string) *time.Ticker { // Retrieve the agent version using a dedicated package tags := []string{ - fmt.Sprintf("version:%s", version.AgentVersion), + "version:" + version.AgentVersion, constants.CardinalityTagPrefix + "none", } diff --git a/pkg/compliance/data.go b/pkg/compliance/data.go index f8d4cf31238b26..e7c45b24630f26 100644 --- a/pkg/compliance/data.go +++ b/pkg/compliance/data.go @@ -351,7 +351,7 @@ func NewResolvedInputs(resolvingContext ResolvingContext, resolved map[string]in ri := make(ResolvedInputs, len(resolved)+1) for k, v := range resolved { if k == "context" { - return nil, fmt.Errorf("NewResolvedInputs: \"context\" is a reserved keyword") + return nil, errors.New("NewResolvedInputs: \"context\" is a reserved keyword") } ri[k] = v } @@ -398,14 +398,14 @@ func (i *InputSpec) Valid() error { // constrained to a specific input type. if i.KubeApiserver != nil || i.Docker != nil || i.Audit != nil { if i.Type != "array" { - return fmt.Errorf("input of types kubeApiserver docker and audit have to be arrays") + return errors.New("input of types kubeApiserver docker and audit have to be arrays") } } else if i.Type == "array" { if i.File == nil { - return fmt.Errorf("bad input results `array`") + return errors.New("bad input results `array`") } if isGlob := i.File.Glob != "" || strings.Contains(i.File.Path, "*"); !isGlob { - return fmt.Errorf("file input results defined as array has to be a glob path") + return errors.New("file input results defined as array has to be a glob path") } } return nil @@ -416,7 +416,7 @@ func (i *InputSpec) Valid() error { // valid. func (b *Benchmark) Valid() error { if len(b.Rules) == 0 { - return fmt.Errorf("bad benchmark: empty rule set") + return errors.New("bad benchmark: empty rule set") } for _, rule := range b.Rules { if len(rule.InputSpecs) == 0 { diff --git a/pkg/compliance/evaluator_rego.go b/pkg/compliance/evaluator_rego.go index 39e84f63767ac6..34e6ec43c5058a 100644 --- a/pkg/compliance/evaluator_rego.go +++ b/pkg/compliance/evaluator_rego.go @@ -71,12 +71,12 @@ func EvaluateRegoRule(ctx context.Context, resolvedInputs ResolvedInputs, benchm return wrapErr(err) } if len(rSet) == 0 || len(rSet[0].Expressions) == 0 { - return wrapErr(fmt.Errorf("empty results set")) + return wrapErr(errors.New("empty results set")) } results, ok := rSet[0].Expressions[0].Value.([]interface{}) if !ok { - return wrapErr(fmt.Errorf("could not cast expression value")) + return wrapErr(errors.New("could not cast expression value")) } log.TraceFunc(func() string { @@ -103,7 +103,7 @@ func EvaluateRegoRule(ctx context.Context, resolvedInputs ResolvedInputs, benchm func newCheckEventFromRegoResult(data interface{}, rule *Rule, resolvedInputs ResolvedInputs, benchmark *Benchmark) *CheckEvent { m, ok := data.(map[string]interface{}) if !ok || m == nil { - return NewCheckError(RegoEvaluator, fmt.Errorf("failed to cast event"), "", "", rule, benchmark) + return NewCheckError(RegoEvaluator, errors.New("failed to cast event"), "", "", rule, benchmark) } var result CheckResult var errReason error @@ -149,7 +149,7 @@ func buildRegoModules(rootDir string, rule *Rule) (map[string]string, error) { modules := map[string]string{ "datadog_helpers.rego": regoHelpersSource, } - ruleFilename := fmt.Sprintf("%s.rego", rule.ID) + ruleFilename := rule.ID + ".rego" ruleCode, err := loadFile(rootDir, ruleFilename) if err != nil && !os.IsNotExist(err) { return nil, err @@ -212,7 +212,7 @@ var regoBuiltins = []func(*rego.Rego){ func(_ rego.BuiltinContext, a *regoast.Term) (*regoast.Term, error) { str, ok := a.Value.(regoast.String) if !ok { - return nil, fmt.Errorf("rego builtin parse_octal was not given a String") + return nil, errors.New("rego builtin parse_octal was not given a String") } value, err := strconv.ParseInt(string(str), 8, 0) if err != nil { diff --git a/pkg/compliance/evaluator_xccdf.go b/pkg/compliance/evaluator_xccdf.go index 34f76bfd82a965..32644c4b4c5638 100644 --- a/pkg/compliance/evaluator_xccdf.go +++ b/pkg/compliance/evaluator_xccdf.go @@ -10,6 +10,7 @@ package compliance import ( "bufio" "context" + "errors" "fmt" "io" "os" @@ -119,7 +120,7 @@ func (p *oscapIO) Run(ctx context.Context) error { cmd.Dir = filepath.Dir(p.File) cmd.Env = os.Environ() if oscapProbeRoot != "" { - cmd.Env = append(cmd.Env, fmt.Sprintf("OSCAP_PROBE_ROOT=%s", oscapProbeRoot)) + cmd.Env = append(cmd.Env, "OSCAP_PROBE_ROOT="+oscapProbeRoot) } p.cmd = cmd @@ -353,10 +354,10 @@ func evaluateXCCDFRule(ctx context.Context, hostname string, statsdClient statsd case XCCDF_RESULT_FAIL: event = NewCheckEvent(XCCDFEvaluator, CheckFailed, ruleResult.Data, hostname, "host", rule, benchmark) case XCCDF_RESULT_ERROR, XCCDF_RESULT_UNKNOWN: - errReason := fmt.Errorf("XCCDF_RESULT_ERROR") + errReason := errors.New("XCCDF_RESULT_ERROR") event = NewCheckError(XCCDFEvaluator, errReason, hostname, "host", rule, benchmark) case XCCDF_RESULT_NOT_APPLICABLE: - skipReason := fmt.Errorf("XCCDF_RESULT_NOT_APPLICABLE") + skipReason := errors.New("XCCDF_RESULT_NOT_APPLICABLE") event = NewCheckSkipped(XCCDFEvaluator, skipReason, hostname, "host", rule, benchmark) case XCCDF_RESULT_NOT_CHECKED, XCCDF_RESULT_NOT_SELECTED: } diff --git a/pkg/compliance/k8sconfig/loader.go b/pkg/compliance/k8sconfig/loader.go index 27663a10f03275..f4840ceb4777ca 100644 --- a/pkg/compliance/k8sconfig/loader.go +++ b/pkg/compliance/k8sconfig/loader.go @@ -15,6 +15,7 @@ import ( "encoding/hex" "encoding/json" "encoding/pem" + "errors" "fmt" "io" "net/url" @@ -440,11 +441,11 @@ func (l *loader) extractCertData(certData []byte) *K8sCertFileMeta { const CertificateBlockType = "CERTIFICATE" certPemBlock, _ := pem.Decode(certData) if certPemBlock == nil { - l.pushError(fmt.Errorf("could not PEM decode certificate data")) + l.pushError(errors.New("could not PEM decode certificate data")) return nil } if certPemBlock.Type != CertificateBlockType { - l.pushError(fmt.Errorf("decoded PEM does not start with correct block type")) + l.pushError(errors.New("decoded PEM does not start with correct block type")) return nil } c, err := x509.ParseCertificate(certPemBlock.Bytes) @@ -564,7 +565,7 @@ func (l *loader) loadKubeconfigMeta(name string) (*K8sKubeconfigMeta, bool) { // in OpenSSH >= 2.6, a fingerprint is now displayed as base64 SHA256. func printSHA256Fingerprint(f []byte) string { - return fmt.Sprintf("SHA256:%s", strings.TrimSuffix(base64.StdEncoding.EncodeToString(f), "=")) + return "SHA256:" + strings.TrimSuffix(base64.StdEncoding.EncodeToString(f), "=") } func printColumnSeparatedHex(d []byte) string { diff --git a/pkg/compliance/reporter.go b/pkg/compliance/reporter.go index 40d7d89ea54dca..67cb4c7a9a9f7f 100644 --- a/pkg/compliance/reporter.go +++ b/pkg/compliance/reporter.go @@ -7,7 +7,6 @@ package compliance import ( "encoding/json" - "fmt" "strings" "time" @@ -67,7 +66,7 @@ func NewLogReporter(hostname string, sourceName, sourceType string, endpoints *c tags := []string{ common.QueryAccountIDTag(), - fmt.Sprintf("host:%s", hostname), + "host:" + hostname, } // merge tags from config diff --git a/pkg/compliance/resolver.go b/pkg/compliance/resolver.go index 9ec9fa0d3b3d90..cd5e5f09bbfe4f 100644 --- a/pkg/compliance/resolver.go +++ b/pkg/compliance/resolver.go @@ -227,7 +227,7 @@ func (r *defaultResolver) ResolveInputs(ctx context.Context, rule *Rule) (Resolv resultType = "constants" result = *spec.Constants default: - return nil, fmt.Errorf("bad input spec") + return nil, errors.New("bad input spec") } tagName := resultType @@ -502,7 +502,7 @@ func (r *defaultResolver) resolveGroup(_ context.Context, spec InputSpecGroup) ( } parts := strings.SplitN(string(line), ":", 4) if len(parts) != 4 { - return nil, fmt.Errorf("malformed group file format") + return nil, errors.New("malformed group file format") } gid, err := strconv.Atoi(parts[2]) if err != nil { diff --git a/pkg/compliance/resolver_k8s.go b/pkg/compliance/resolver_k8s.go index d2f1ea16f9ba5b..f60678a7079a8e 100644 --- a/pkg/compliance/resolver_k8s.go +++ b/pkg/compliance/resolver_k8s.go @@ -9,6 +9,7 @@ package compliance import ( "context" + "errors" "fmt" kubemetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -83,11 +84,11 @@ func (r *k8sapiserverResolver) resolveKubeApiserver(ctx context.Context, spec In } if len(spec.Kind) == 0 { - return nil, fmt.Errorf("cannot run Kubeapiserver check, resource kind is empty") + return nil, errors.New("cannot run Kubeapiserver check, resource kind is empty") } if len(spec.APIRequest.Verb) == 0 { - return nil, fmt.Errorf("cannot run Kubeapiserver check, action verb is empty") + return nil, errors.New("cannot run Kubeapiserver check, action verb is empty") } if len(spec.Version) == 0 { @@ -124,7 +125,7 @@ func (r *k8sapiserverResolver) resolveKubeApiserver(ctx context.Context, spec In switch api.Verb { case "get": if len(api.ResourceName) == 0 { - return nil, fmt.Errorf("unable to use 'get' apirequest without resource name") + return nil, errors.New("unable to use 'get' apirequest without resource name") } resource, err := resourceAPI.Get(ctx, spec.APIRequest.ResourceName, kubemetav1.GetOptions{}) if err != nil { diff --git a/pkg/compliance/scap/syschar.go b/pkg/compliance/scap/syschar.go index 187b9fd874ad97..cb2a90e280a512 100644 --- a/pkg/compliance/scap/syschar.go +++ b/pkg/compliance/scap/syschar.go @@ -7,7 +7,7 @@ // results. package scap -import "fmt" +import "errors" // SystemCharacteristics contains the internal charasteristics of an OVAL // evaluation. @@ -40,7 +40,7 @@ type Item struct { // SysChar returns the refined system characteristics from a Document. func SysChar(doc *Document) (*SystemCharacteristics, error) { if doc.OvalSystemCharacteristics == nil { - return nil, fmt.Errorf("OvalSystemCharacteristics is nil") + return nil, errors.New("OvalSystemCharacteristics is nil") } systemCharacteristics := SystemCharacteristics{} diff --git a/pkg/compliance/tests/helpers.go b/pkg/compliance/tests/helpers.go index 179455d975c656..23146d4cfef6bd 100644 --- a/pkg/compliance/tests/helpers.go +++ b/pkg/compliance/tests/helpers.go @@ -325,16 +325,18 @@ scope: input: %s` - suite := fmt.Sprintf(suiteTpl, name, "framework_"+name, "42.12") + var suiteBuilder strings.Builder + suiteBuilder.WriteString(fmt.Sprintf(suiteTpl, name, "framework_"+name, "42.12")) for _, rule := range rules { scope := rule.scope if scope == "" { scope = "none" } ruleData := fmt.Sprintf(ruleTpl, rule.name, scope, indent(1, rule.input)) - suite += "\n - " + indent(2, ruleData) + suiteBuilder.WriteString("\n - ") + suiteBuilder.WriteString(indent(2, ruleData)) } - return suite + return suiteBuilder.String() } func indent(count int, s string) string { diff --git a/pkg/compliance/tools/k8s_types_generator/main.go b/pkg/compliance/tools/k8s_types_generator/main.go index 24a746a17c7cfd..3d1efeca7999d5 100644 --- a/pkg/compliance/tools/k8s_types_generator/main.go +++ b/pkg/compliance/tools/k8s_types_generator/main.go @@ -469,8 +469,8 @@ func printKomponentCode(komp *komponent) string { titled := cases.Title(language.English, cases.NoLower).String(komp.name) goStructName := strings.ReplaceAll(titled, "-", "") - s := "" - s += fmt.Sprintf("type K8s%sConfig struct {\n", goStructName) + var sb strings.Builder + fmt.Fprintf(&sb, "type K8s%sConfig struct {\n", goStructName) for _, c := range komp.confs { if !isKnownFlag(c.flagName) { continue @@ -479,21 +479,22 @@ func printKomponentCode(komp *komponent) string { if !strings.HasPrefix(goType, "*") && !strings.HasPrefix(goType, "[]") { goType = "*" + goType } - s += fmt.Sprintf(" %s %s `json:\"%s,omitempty\"` // versions: %s\n", + fmt.Fprintf(&sb, " %s %s `json:\"%s,omitempty\"` // versions: %s\n", toGoField(c.flagName), goType, toGoJSONTag(c.flagName), strings.Join(c.versions, ", ")) } - s += " SkippedFlags map[string]string `json:\"skippedFlags,omitempty\"`\n" - s += "}\n" - s += fmt.Sprintf("func (l *loader) newK8s%sConfig(flags map[string]string) *K8s%sConfig {\n", goStructName, goStructName) - s += "if (flags == nil) { return nil }\n" - s += fmt.Sprintf("var res K8s%sConfig\n", goStructName) + sb.WriteString(" SkippedFlags map[string]string `json:\"skippedFlags,omitempty\"`\n") + sb.WriteString("}\n") + fmt.Fprintf(&sb, "func (l *loader) newK8s%sConfig(flags map[string]string) *K8s%sConfig {\n", goStructName, goStructName) + sb.WriteString("if (flags == nil) { return nil }\n") + fmt.Fprintf(&sb, "var res K8s%sConfig\n", goStructName) for _, c := range komp.confs { if !isKnownFlag(c.flagName) { continue } - s += fmt.Sprintf("if v, ok := flags[\"--%s\"]; ok {\n", c.flagName) - s += fmt.Sprintf("delete(flags, \"--%s\")\n", c.flagName) - s += printAssignment(c, "v") + "\n" + fmt.Fprintf(&sb, "if v, ok := flags[\"--%s\"]; ok {\n", c.flagName) + fmt.Fprintf(&sb, "delete(flags, \"--%s\")\n", c.flagName) + sb.WriteString(printAssignment(c, "v")) + sb.WriteString("\n") if c.flagDefault != "" { // kube-apiserver and etcd components do not have any configuration file. if komp.name != "kube-apiserver" && komp.name != "etcd" && komp.name != "kube-controller-manager" { @@ -506,21 +507,21 @@ func printKomponentCode(komp *komponent) string { panic(fmt.Errorf("missing %s configuration associated path to flag %q (default = %q)", komp.name, c.flagName, c.flagDefault)) } if configCursor != "" { - s += fmt.Sprintf("\n} else if !l.configFileMetaHasField(res.Config, %q) {\n", configCursor) + fmt.Fprintf(&sb, "\n} else if !l.configFileMetaHasField(res.Config, %q) {\n", configCursor) } else { - s += "\n} else {\n" + sb.WriteString("\n} else {\n") } } else { - s += "\n} else {\n" + sb.WriteString("\n} else {\n") } - s += printAssignment(c, fmt.Sprintf("%q", c.flagDefault)) + sb.WriteString(printAssignment(c, fmt.Sprintf("%q", c.flagDefault))) } - s += "}\n" + sb.WriteString("}\n") } - s += "if len(flags) > 0 { res.SkippedFlags = flags }\n" - s += "return &res\n" - s += "}\n" - return s + sb.WriteString("if len(flags) > 0 { res.SkippedFlags = flags }\n") + sb.WriteString("return &res\n") + sb.WriteString("}\n") + return sb.String() } func downloadEtcdAndExtractFlags(componentVersion string) *komponent { diff --git a/pkg/compliance/utils/processes_linux.go b/pkg/compliance/utils/processes_linux.go index 18f769f6096a1f..09fa3158310680 100644 --- a/pkg/compliance/utils/processes_linux.go +++ b/pkg/compliance/utils/processes_linux.go @@ -10,7 +10,8 @@ package utils import ( // We wrap pkg/security/utils here only for compat reason to be able to // still compile pkg/compliance on !linux. - "fmt" + + "errors" secutils "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -64,5 +65,5 @@ func GetContainerOverlayPath(pid int32) (string, error) { } } } - return "", fmt.Errorf("could not find overlay mountpoint") + return "", errors.New("could not find overlay mountpoint") } diff --git a/pkg/compliance/utils/processes_nolinux.go b/pkg/compliance/utils/processes_nolinux.go index 7bfd1fdfea1247..6c24296e7a4fd8 100644 --- a/pkg/compliance/utils/processes_nolinux.go +++ b/pkg/compliance/utils/processes_nolinux.go @@ -7,7 +7,7 @@ package utils -import "fmt" +import "errors" // ContainerID wraps a string representing a container identifier. type ContainerID string @@ -26,5 +26,5 @@ func GetProcessRootPath(_ int32) (string, bool) { // GetContainerOverlayPath tries to extract the directory mounted as root // mountpoint of the given process. func GetContainerOverlayPath(_ int32) (string, error) { - return "", fmt.Errorf("not implemented") + return "", errors.New("not implemented") } diff --git a/pkg/config/legacy/converter.go b/pkg/config/legacy/converter.go index fed465a1b56f76..73d9df689b3dd2 100644 --- a/pkg/config/legacy/converter.go +++ b/pkg/config/legacy/converter.go @@ -8,6 +8,7 @@ package legacy import ( "encoding/csv" + "errors" "fmt" "net/url" "slices" @@ -244,7 +245,7 @@ func extractTraceAgentConfig(agentConfig Config, converter *ConfigConverter) err func isAffirmative(value string) (bool, error) { if value == "" { - return false, fmt.Errorf("value is empty") + return false, errors.New("value is empty") } v := strings.ToLower(value) @@ -256,7 +257,7 @@ func extractURLAPIKeys(agentConfig Config, converter *ConfigConverter) error { keys := strings.Split(agentConfig["api_key"], ",") if len(urls) != len(keys) { - return fmt.Errorf("Invalid number of 'dd_url'/'api_key': please provide one api_key for each url") + return errors.New("Invalid number of 'dd_url'/'api_key': please provide one api_key for each url") } if urls[0] != "https://app.datadoghq.com" { @@ -275,7 +276,7 @@ func extractURLAPIKeys(agentConfig Config, converter *ConfigConverter) error { additionalEndpoints := map[string][]string{} for idx, url := range urls { if url == "" || keys[idx] == "" { - return fmt.Errorf("Found empty additional 'dd_url' or 'api_key'. Please check that you don't have any misplaced commas") + return errors.New("Found empty additional 'dd_url' or 'api_key'. Please check that you don't have any misplaced commas") } keys[idx] = configUtils.SanitizeAPIKey(keys[idx]) additionalEndpoints[url] = append(additionalEndpoints[url], keys[idx]) diff --git a/pkg/config/legacy/docker.go b/pkg/config/legacy/docker.go index 69b284ca4bb50e..a99d184cfd2e44 100644 --- a/pkg/config/legacy/docker.go +++ b/pkg/config/legacy/docker.go @@ -8,6 +8,7 @@ package legacy import ( + "errors" "fmt" "os" "path/filepath" @@ -114,7 +115,7 @@ func ImportDockerConf(src, dst string, overwrite bool, converter *ConfigConverte return fmt.Errorf("unable to create a backup copy of the destination file: %v", err) } } else { - return fmt.Errorf("destination file already exists, run the command again with --force or -f to overwrite it") + return errors.New("destination file already exists, run the command again with --force or -f to overwrite it") } } // Create necessary destination dir diff --git a/pkg/config/legacy/kubernetes.go b/pkg/config/legacy/kubernetes.go index a6aa117cd28c71..11b48c870331c9 100644 --- a/pkg/config/legacy/kubernetes.go +++ b/pkg/config/legacy/kubernetes.go @@ -6,6 +6,7 @@ package legacy import ( + "errors" "fmt" "os" "path/filepath" @@ -125,7 +126,7 @@ func importKubernetesConfWithDeprec(src, dst string, overwrite bool, converter * return deprecations, fmt.Errorf("unable to create a backup copy of the destination file: %v", err) } } else { - return deprecations, fmt.Errorf("destination file already exists, run the command again with --force or -f to overwrite it") + return deprecations, errors.New("destination file already exists, run the command again with --force or -f to overwrite it") } } // Create necessary destination dir diff --git a/pkg/config/nodetreemodel/config.go b/pkg/config/nodetreemodel/config.go index bc6e7ec661da3c..32a11dd6da6646 100644 --- a/pkg/config/nodetreemodel/config.go +++ b/pkg/config/nodetreemodel/config.go @@ -843,7 +843,7 @@ func (c *ntmConfig) MergeConfig(in io.Reader) error { defer c.Unlock() if !c.isReady() && !c.allowDynamicSchema.Load() { - return fmt.Errorf("attempt to MergeConfig before config is constructed") + return errors.New("attempt to MergeConfig before config is constructed") } content, err := io.ReadAll(in) diff --git a/pkg/config/nodetreemodel/missing_node.go b/pkg/config/nodetreemodel/missing_node.go index 5abe91a25e7bd6..bb084f99441085 100644 --- a/pkg/config/nodetreemodel/missing_node.go +++ b/pkg/config/nodetreemodel/missing_node.go @@ -6,7 +6,7 @@ package nodetreemodel import ( - "fmt" + "errors" "github.com/DataDog/datadog-agent/pkg/config/model" ) @@ -19,7 +19,7 @@ var _ Node = (*missingLeafImpl)(nil) var missingLeaf = &missingLeafImpl{} func (m *missingLeafImpl) GetChild(string) (Node, error) { - return nil, fmt.Errorf("GetChild(): missing") + return nil, errors.New("GetChild(): missing") } func (m *missingLeafImpl) Get() interface{} { @@ -27,11 +27,11 @@ func (m *missingLeafImpl) Get() interface{} { } func (m *missingLeafImpl) ReplaceValue(interface{}) error { - return fmt.Errorf("Replacevalue(): missing") + return errors.New("Replacevalue(): missing") } func (m *missingLeafImpl) SetWithSource(interface{}, model.Source) error { - return fmt.Errorf("SetWithSource(): missing") + return errors.New("SetWithSource(): missing") } func (m *missingLeafImpl) Source() model.Source { diff --git a/pkg/config/nodetreemodel/node.go b/pkg/config/nodetreemodel/node.go index 4df61a422a876f..54fcbc409475d6 100644 --- a/pkg/config/nodetreemodel/node.go +++ b/pkg/config/nodetreemodel/node.go @@ -6,6 +6,7 @@ package nodetreemodel import ( + "errors" "fmt" "reflect" @@ -14,7 +15,7 @@ import ( ) // ErrNotFound is an error for when a key is not found -var ErrNotFound = fmt.Errorf("not found") +var ErrNotFound = errors.New("not found") func mapToMapString(m reflect.Value) map[string]interface{} { if v, ok := m.Interface().(map[string]interface{}); ok { diff --git a/pkg/config/nodetreemodel/read_config_file.go b/pkg/config/nodetreemodel/read_config_file.go index b5a0af3a640985..343d880df5bad8 100644 --- a/pkg/config/nodetreemodel/read_config_file.go +++ b/pkg/config/nodetreemodel/read_config_file.go @@ -6,6 +6,7 @@ package nodetreemodel import ( + "errors" "fmt" "io" "os" @@ -155,7 +156,7 @@ func loadYamlInto(dest InnerNode, source model.Source, inData map[string]interfa c, _ := dest.GetChild(key) if _, ok := c.(InnerNode); ok { // Both default and dest have a child but they conflict in type. This should never happen. - warnings = append(warnings, fmt.Errorf("invalid tree: default and dest tree don't have the same layout")) + warnings = append(warnings, errors.New("invalid tree: default and dest tree don't have the same layout")) } else { dest.InsertChildNode(key, newLeafNode(value, source)) } @@ -184,7 +185,7 @@ func loadYamlInto(dest InnerNode, source model.Source, inData map[string]interfa destChildInner, ok := destChild.(InnerNode) if !ok { // Both default and dest have a child but they conflict in type. This should never happen. - warnings = append(warnings, fmt.Errorf("invalid tree: default and dest tree don't have the same layout")) + warnings = append(warnings, errors.New("invalid tree: default and dest tree don't have the same layout")) continue } warnings = append(warnings, loadYamlInto(destChildInner, source, childValue, currPath, schemaInner, allowDynamicSchema)...) diff --git a/pkg/config/nodetreemodel/reflection_node.go b/pkg/config/nodetreemodel/reflection_node.go index 9c70aa47c1d311..325dc26ceea6d1 100644 --- a/pkg/config/nodetreemodel/reflection_node.go +++ b/pkg/config/nodetreemodel/reflection_node.go @@ -6,6 +6,7 @@ package nodetreemodel import ( + "errors" "fmt" "reflect" @@ -15,8 +16,8 @@ import ( var ( // error when a caller tries to construct a node from reflect.Value, this is a logic error, calling code should // not be reflection based, but should be working with "native" go types that come from parsing json, yaml, etc - errReflectValue = fmt.Errorf("refusing to construct node from reflect.Value") - errUnknownConversion = fmt.Errorf("no conversion found") + errReflectValue = errors.New("refusing to construct node from reflect.Value") + errUnknownConversion = errors.New("no conversion found") ) // asReflectionNode returns a node using reflection: should only show up in test code diff --git a/pkg/config/nodetreemodel/struct_node.go b/pkg/config/nodetreemodel/struct_node.go index aeb7bbf7c49a57..7e9857e1f08822 100644 --- a/pkg/config/nodetreemodel/struct_node.go +++ b/pkg/config/nodetreemodel/struct_node.go @@ -6,7 +6,7 @@ package nodetreemodel import ( - "fmt" + "errors" "reflect" "slices" "strings" @@ -43,7 +43,7 @@ func (n *structNodeImpl) HasChild(name string) bool { } func (n *structNodeImpl) Merge(InnerNode) (InnerNode, error) { - return nil, fmt.Errorf("not implemented") + return nil, errors.New("not implemented") } // ChildrenKeys returns the list of keys of the children of the given node, if it is a map @@ -64,7 +64,7 @@ func (n *structNodeImpl) ChildrenKeys() []string { // SetAt is not implemented for a struct node func (n *structNodeImpl) SetAt([]string, interface{}, model.Source) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } // InsertChildNode is not implemented for a struct node @@ -90,7 +90,7 @@ func (n *structNodeImpl) Get() interface{} { // SetWithSource assigns a value in the config, for the given source func (n *structNodeImpl) SetWithSource(interface{}, model.Source) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } // Source returns the source for this leaf diff --git a/pkg/config/remote/api/http.go b/pkg/config/remote/api/http.go index 9e71934fca2d73..eaa07de4903619 100644 --- a/pkg/config/remote/api/http.go +++ b/pkg/config/remote/api/http.go @@ -41,21 +41,16 @@ const ( var ( // ErrUnauthorized is the error that will be logged for the customer to see in case of a 401. We make it as // descriptive as possible (while not leaking data) to make RC onboarding easier - ErrUnauthorized = fmt.Errorf("unauthorized. Please make sure your API key is valid and has the Remote Config scope") + ErrUnauthorized = errors.New("unauthorized. Please make sure your API key is valid and has the Remote Config scope") // ErrProxy is the error that will be logged if we suspect that there is a wrong proxy setup for remote-config. // It is displayed for any 4XX status code except 401 - ErrProxy = fmt.Errorf( - "4XX status code. This might be related to the proxy settings. " + - "Please make sure the agent can reach Remote Configuration with the proxy setup", + ErrProxy = errors.New("4XX status code. This might be related to the proxy settings. " + + "Please make sure the agent can reach Remote Configuration with the proxy setup", ) // ErrGatewayTimeout is the error that will be logged if there is a gateway timeout - ErrGatewayTimeout = fmt.Errorf( - "non-200 response code: 504", - ) + ErrGatewayTimeout = errors.New("non-200 response code: 504") // ErrServiceUnavailable is the error that will be logged if there is the service is unavailable - ErrServiceUnavailable = fmt.Errorf( - "non-200 response code: 503", - ) + ErrServiceUnavailable = errors.New("non-200 response code: 503") ) // API is the interface to implement for a configuration fetcher @@ -119,7 +114,7 @@ func NewHTTPClient(auth Auth, cfg model.Reader, baseURL *url.URL) (*HTTPClient, return nil, fmt.Errorf("remote Configuration URL %s is invalid as TLS is required by default. While it is not advised, the `remote_configuration.no_tls` config option can be set to `true` to disable this protection", baseURL) } if transport.TLSClientConfig.InsecureSkipVerify && !cfg.GetBool("remote_configuration.no_tls_validation") { - return nil, fmt.Errorf("remote Configuration does not allow skipping TLS validation by default (currently skipped because `skip_ssl_validation` is set to true). While it is not advised, the `remote_configuration.no_tls_validation` config option can be set to `true` to disable this protection") + return nil, errors.New("remote Configuration does not allow skipping TLS validation by default (currently skipped because `skip_ssl_validation` is set to true). While it is not advised, the `remote_configuration.no_tls_validation` config option can be set to `true` to disable this protection") } return &HTTPClient{ client: httpClient, diff --git a/pkg/config/remote/client/client.go b/pkg/config/remote/client/client.go index 812daef78cd37d..37f675ae9dc22e 100644 --- a/pkg/config/remote/client/client.go +++ b/pkg/config/remote/client/client.go @@ -12,7 +12,6 @@ import ( "crypto/rand" "crypto/tls" "encoding/hex" - "fmt" "slices" "sync" "time" @@ -198,7 +197,7 @@ func newAgentGRPCClient(ipcAddress string, cmdPort string, tlsConfig *tls.Config // ClientGetConfigs implements the ConfigFetcher interface for agentGRPCConfigFetcher func (g *agentGRPCConfigFetcher) ClientGetConfigs(ctx context.Context, request *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) { md := metadata.MD{ - "authorization": []string{fmt.Sprintf("Bearer %s", g.authToken)}, + "authorization": []string{"Bearer " + g.authToken}, } ctx = metadata.NewOutgoingContext(ctx, md) diff --git a/pkg/config/remote/data/file.go b/pkg/config/remote/data/file.go index 53299135a834e0..48bdbf4c8037ec 100644 --- a/pkg/config/remote/data/file.go +++ b/pkg/config/remote/data/file.go @@ -7,6 +7,7 @@ package data import ( + "errors" "fmt" "regexp" "strconv" @@ -67,7 +68,7 @@ func parseDatadogConfigPath(path string) (ConfigPath, error) { } rawProduct := matchedGroups[2] if len(rawProduct) == 0 { - return ConfigPath{}, fmt.Errorf("product is empty") + return ConfigPath{}, errors.New("product is empty") } return ConfigPath{ Source: SourceDatadog, @@ -85,7 +86,7 @@ func parseEmployeeConfigPath(path string) (ConfigPath, error) { } rawProduct := matchedGroups[1] if len(rawProduct) == 0 { - return ConfigPath{}, fmt.Errorf("product is empty") + return ConfigPath{}, errors.New("product is empty") } return ConfigPath{ Source: SourceEmployee, diff --git a/pkg/config/remote/service/service_test.go b/pkg/config/remote/service/service_test.go index 719d3ca5cb6071..7d6ba0ece01c79 100644 --- a/pkg/config/remote/service/service_test.go +++ b/pkg/config/remote/service/service_test.go @@ -1234,7 +1234,7 @@ func TestOrgStatus(t *testing.T) { assert.True(t, prev.Enabled) assert.True(t, prev.Authorized) - api.On("FetchOrgStatus", mock.Anything).Return(nil, fmt.Errorf("Error")) + api.On("FetchOrgStatus", mock.Anything).Return(nil, errors.New("Error")) service.orgStatusPoller.poll(service.api, service.rcType) prev = service.orgStatusPoller.getPreviousStatus() assert.True(t, prev.Enabled) diff --git a/pkg/config/remote/service/util.go b/pkg/config/remote/service/util.go index 30355d8222bb07..3b099fae11011d 100644 --- a/pkg/config/remote/service/util.go +++ b/pkg/config/remote/service/util.go @@ -8,7 +8,7 @@ package service import ( "encoding/base32" "encoding/json" - "fmt" + "errors" "strings" "github.com/DataDog/datadog-agent/pkg/config/remote/api" @@ -61,7 +61,7 @@ func getRemoteConfigAuthKeys(apiKey string, rcKey string, parJWT string) (remote return remoteConfigAuthKeys{}, err } if key.AppKey == "" || key.Datacenter == "" || key.OrgID == 0 { - return remoteConfigAuthKeys{}, fmt.Errorf("invalid remote config key") + return remoteConfigAuthKeys{}, errors.New("invalid remote config key") } return remoteConfigAuthKeys{ apiKey: apiKey, diff --git a/pkg/config/remote/uptane/client.go b/pkg/config/remote/uptane/client.go index aadf1a64a0281e..f38393ec6c676d 100644 --- a/pkg/config/remote/uptane/client.go +++ b/pkg/config/remote/uptane/client.go @@ -304,7 +304,7 @@ func (c *Client) unsafeTargetsMeta() ([]byte, error) { } targets, found := metas[metaTargets] if !found { - return nil, fmt.Errorf("empty targets meta in director local store") + return nil, errors.New("empty targets meta in director local store") } return targets, nil } diff --git a/pkg/config/remote/uptane/local_store.go b/pkg/config/remote/uptane/local_store.go index 240712bfbc719f..0cb3d0235a8e81 100644 --- a/pkg/config/remote/uptane/local_store.go +++ b/pkg/config/remote/uptane/local_store.go @@ -38,8 +38,8 @@ type localStore struct { func newLocalStore(ts *transactionalStore, repository string, initialRoots meta.EmbeddedRoot) (*localStore, error) { s := &localStore{ store: ts, - metasBucket: fmt.Sprintf("%s_metas", repository), - rootsBucket: fmt.Sprintf("%s_roots", repository), + metasBucket: repository + "_metas", + rootsBucket: repository + "_roots", } err := s.init(initialRoots) return s, err diff --git a/pkg/config/remote/uptane/util.go b/pkg/config/remote/uptane/util.go index 651e87db7c59b3..bd64e972d58272 100644 --- a/pkg/config/remote/uptane/util.go +++ b/pkg/config/remote/uptane/util.go @@ -70,7 +70,7 @@ func unsafeMetaVersion(rawMeta json.RawMessage) (uint64, error) { return 0, err } if metaVersion.Signed == nil || metaVersion.Signed.Version == nil { - return 0, fmt.Errorf("invalid meta: version field is missing") + return 0, errors.New("invalid meta: version field is missing") } return *metaVersion.Signed.Version, nil } @@ -86,7 +86,7 @@ func unsafeMetaCustom(rawMeta json.RawMessage) ([]byte, error) { return nil, err } if metaVersion.Signed == nil { - return nil, fmt.Errorf("invalid meta: signed is missing") + return nil, errors.New("invalid meta: signed is missing") } return []byte(metaVersion.Signed.Custom), nil } @@ -102,7 +102,7 @@ func unsafeMetaExpires(rawMeta json.RawMessage) (time.Time, error) { return time.Time{}, err } if metaExpires.Signed == nil { - return time.Time{}, fmt.Errorf("invalid meta: signed is missing") + return time.Time{}, errors.New("invalid meta: signed is missing") } return metaExpires.Signed.Expires, nil } @@ -184,7 +184,7 @@ func recreate(path string, agentVersion string, apiKeyHash string, url string) ( }) if err != nil { if errors.Is(err, bbolterr.ErrTimeout) { - return nil, fmt.Errorf("rc db is locked. Please check if another instance of the agent is running and using the same `run_path` parameter") + return nil, errors.New("rc db is locked. Please check if another instance of the agent is running and using the same `run_path` parameter") } return nil, err } @@ -217,12 +217,12 @@ func getMetadata(db *bbolt.DB) (AgentMetadata, error) { bucket := tx.Bucket([]byte(metaBucket)) if bucket == nil { log.Infof("Missing meta bucket") - return fmt.Errorf("could not get RC metadata: missing bucket") + return errors.New("could not get RC metadata: missing bucket") } metadataBytes := bucket.Get([]byte(metaFile)) if metadataBytes == nil { log.Infof("Missing meta file in meta bucket") - return fmt.Errorf("could not get RC metadata: missing meta file") + return errors.New("could not get RC metadata: missing meta file") } err = json.Unmarshal(metadataBytes, &metadata) if err != nil { @@ -242,7 +242,7 @@ func openCacheDB(path string, agentVersion string, apiKey string, url string) (* }) if err != nil { if errors.Is(err, bbolterr.ErrTimeout) { - return nil, fmt.Errorf("rc db is locked. Please check if another instance of the agent is running and using the same `run_path` parameter") + return nil, errors.New("rc db is locked. Please check if another instance of the agent is running and using the same `run_path` parameter") } log.Infof("Failed to open remote configuration database %s", err) return recreate(path, agentVersion, apiKeyHash, url) diff --git a/pkg/config/remote/uptane/util_test.go b/pkg/config/remote/uptane/util_test.go index 648b7988defd52..9cde7435611963 100644 --- a/pkg/config/remote/uptane/util_test.go +++ b/pkg/config/remote/uptane/util_test.go @@ -8,13 +8,13 @@ package uptane import ( "bytes" "encoding/json" - "fmt" "os" "path/filepath" "testing" "time" "github.com/DataDog/go-tuf/data" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" @@ -166,11 +166,11 @@ func getBucketMetadata(db *bbolt.DB) (*AgentMetadata, error) { } bucket := tx.Bucket([]byte(metaBucket)) if bucket == nil { - return nil, fmt.Errorf("No bucket") + return nil, errors.New("No bucket") } metaBytes := bucket.Get([]byte(metaFile)) if metaBytes == nil { - return nil, fmt.Errorf("No meta file") + return nil, errors.New("No meta file") } metadata := new(AgentMetadata) err = json.Unmarshal(metaBytes, metadata) @@ -192,12 +192,12 @@ func checkData(db *bbolt.DB) error { return db.View(func(tx *bbolt.Tx) error { bucket := tx.Bucket([]byte("test")) if bucket == nil { - return fmt.Errorf("Bucket not present") + return errors.New("Bucket not present") } data := bucket.Get([]byte("test")) if !bytes.Equal(data, []byte("test")) { - return fmt.Errorf("Invalid test data") + return errors.New("Invalid test data") } return nil }) diff --git a/pkg/config/settings/http/client.go b/pkg/config/settings/http/client.go index 9030431dae5f0d..5a556344268dce 100644 --- a/pkg/config/settings/http/client.go +++ b/pkg/config/settings/http/client.go @@ -148,7 +148,7 @@ func (rc *runtimeSettingsClient) FullConfig() (string, error) { } func (rc *runtimeSettingsClient) FullConfigWithoutDefaults() (string, error) { - r, err := rc.doGet(fmt.Sprintf("%s/without-defaults", rc.baseURL), true) + r, err := rc.doGet(rc.baseURL+"/without-defaults", true) if err != nil { return "", err } @@ -156,7 +156,7 @@ func (rc *runtimeSettingsClient) FullConfigWithoutDefaults() (string, error) { } func (rc *runtimeSettingsClient) FullConfigBySource() (string, error) { - r, err := rc.doGet(fmt.Sprintf("%s/by-source", rc.baseURL), true) + r, err := rc.doGet(rc.baseURL+"/by-source", true) if err != nil { return "", err } @@ -164,7 +164,7 @@ func (rc *runtimeSettingsClient) FullConfigBySource() (string, error) { } func (rc *runtimeSettingsClient) List() (map[string]settingsComponent.RuntimeSettingResponse, error) { - r, err := rc.doGet(fmt.Sprintf("%s/list-runtime", rc.baseURL), false) + r, err := rc.doGet(rc.baseURL+"/list-runtime", false) if err != nil { return nil, err } @@ -223,7 +223,7 @@ func (rc *runtimeSettingsClient) Set(key string, value string) (bool, error) { return false, err } - body := fmt.Sprintf("value=%s", html.EscapeString(value)) + body := "value=" + html.EscapeString(value) r, err := rc.c.DoPost(fmt.Sprintf("%s/%s", rc.baseURL, key), "application/x-www-form-urlencoded", bytes.NewBuffer([]byte(body))) if err != nil { errMap := make(map[string]string) diff --git a/pkg/config/settings/runtime_profiling.go b/pkg/config/settings/runtime_profiling.go index a3d41abab34941..78da5474019ab7 100644 --- a/pkg/config/settings/runtime_profiling.go +++ b/pkg/config/settings/runtime_profiling.go @@ -7,6 +7,7 @@ package settings import ( "fmt" + "strconv" "github.com/fatih/color" ) @@ -52,7 +53,7 @@ func setRuntimeSetting(c Client, name string, value int) (interface{}, error) { return nil, fmt.Errorf("failed to get current value of %s: %v", name, err) } - if _, err := c.Set(name, fmt.Sprint(value)); err != nil { + if _, err := c.Set(name, strconv.Itoa(value)); err != nil { return nil, fmt.Errorf("failed to set %s to %v: %v", name, value, err) } diff --git a/pkg/config/settings/runtime_setting.go b/pkg/config/settings/runtime_setting.go index 9c257a3944c246..89b4f708bade04 100644 --- a/pkg/config/settings/runtime_setting.go +++ b/pkg/config/settings/runtime_setting.go @@ -7,6 +7,7 @@ package settings import ( + "errors" "fmt" "strconv" ) @@ -32,7 +33,7 @@ func GetBool(v interface{}) (bool, error) { } b, ok := v.(bool) if !ok { - return false, fmt.Errorf("GetBool: bad parameter value provided") + return false, errors.New("GetBool: bad parameter value provided") } return b, nil } diff --git a/pkg/config/setup/apm.go b/pkg/config/setup/apm.go index 954a4dd2708ef7..d5ecdc83489936 100644 --- a/pkg/config/setup/apm.go +++ b/pkg/config/setup/apm.go @@ -8,7 +8,7 @@ package setup import ( "encoding/csv" "encoding/json" - "fmt" + "errors" "runtime" "strconv" "strings" @@ -277,11 +277,11 @@ func splitCSVString(s string, sep rune) ([]string, error) { func parseNameAndRate(token string) (string, float64, error) { parts := strings.Split(token, "=") if len(parts) != 2 { - return "", 0, fmt.Errorf("Bad format") + return "", 0, errors.New("Bad format") } rate, err := strconv.ParseFloat(parts[1], 64) if err != nil { - return "", 0, fmt.Errorf("Unabled to parse rate") + return "", 0, errors.New("Unabled to parse rate") } return parts[0], rate, nil } diff --git a/pkg/config/setup/config_test.go b/pkg/config/setup/config_test.go index bf52549ff53b97..4026492109dfd2 100644 --- a/pkg/config/setup/config_test.go +++ b/pkg/config/setup/config_test.go @@ -153,7 +153,7 @@ b: func TestUnknownVarsWarning(t *testing.T) { test := func(v string, unknown bool, additional []string) func(*testing.T) { return func(t *testing.T) { - env := []string{fmt.Sprintf("%s=foo", v)} + env := []string{v + "=foo"} var exp []string if unknown { exp = append(exp, v) diff --git a/pkg/config/utils/clusteragent.go b/pkg/config/utils/clusteragent.go index 4cfed0632dc773..b05c466c3da864 100644 --- a/pkg/config/utils/clusteragent.go +++ b/pkg/config/utils/clusteragent.go @@ -34,7 +34,7 @@ func GetClusterAgentEndpoint() (string, error) { } if !strings.Contains(dcaURL, "://") { log.Tracef("Adding https scheme to %s: https://%s", dcaURL, dcaURL) - dcaURL = fmt.Sprintf("https://%s", dcaURL) + dcaURL = "https://" + dcaURL } u, err := url.Parse(dcaURL) if err != nil { @@ -59,14 +59,14 @@ func GetClusterAgentEndpoint() (string, error) { dcaSvc = strings.ReplaceAll(dcaSvc, "-", "_") // Kubernetes replaces "-" with "_" in the service names injected in the env var. // host - dcaSvcHostEnv := fmt.Sprintf("%s_SERVICE_HOST", dcaSvc) + dcaSvcHostEnv := dcaSvc + "_SERVICE_HOST" dcaSvcHost := os.Getenv(dcaSvcHostEnv) if dcaSvcHost == "" { return "", fmt.Errorf("cannot get a cluster agent endpoint for kubernetes service %s, env %s is empty", dcaSvc, dcaSvcHostEnv) } // port - dcaSvcPort := os.Getenv(fmt.Sprintf("%s_SERVICE_PORT", dcaSvc)) + dcaSvcPort := os.Getenv(dcaSvc + "_SERVICE_PORT") if dcaSvcPort == "" { return "", fmt.Errorf("cannot get a cluster agent endpoint for kubernetes service %s, env %s is empty", dcaSvc, dcaSvcPort) } diff --git a/pkg/databasemonitoring/aws/aurora.go b/pkg/databasemonitoring/aws/aurora.go index 9449a7686e8853..f614b3d0452255 100644 --- a/pkg/databasemonitoring/aws/aurora.go +++ b/pkg/databasemonitoring/aws/aurora.go @@ -9,6 +9,7 @@ package aws import ( "context" + "errors" "fmt" "hash/fnv" "strconv" @@ -35,7 +36,7 @@ const ( // requires the dbClusterIdentifier for the cluster func (c *Client) GetAuroraClusterEndpoints(ctx context.Context, dbClusterIdentifiers []string, dbmTag string) (map[string]*AuroraCluster, error) { if len(dbClusterIdentifiers) == 0 { - return nil, fmt.Errorf("at least one database cluster identifier is required") + return nil, errors.New("at least one database cluster identifier is required") } clusters := make(map[string]*AuroraCluster, 0) for _, clusterID := range dbClusterIdentifiers { @@ -141,12 +142,12 @@ func containsTags(clusterTags []types.Tag, providedTags []string) bool { func (c *Instance) Digest(checkType, clusterID string) string { h := fnv.New64() // Hash write never returns an error - h.Write([]byte(checkType)) //nolint:errcheck - h.Write([]byte(clusterID)) //nolint:errcheck - h.Write([]byte(c.Endpoint)) //nolint:errcheck - h.Write([]byte(fmt.Sprintf("%d", c.Port))) //nolint:errcheck - h.Write([]byte(c.Engine)) //nolint:errcheck - h.Write([]byte(fmt.Sprintf("%t", c.IamEnabled))) //nolint:errcheck + h.Write([]byte(checkType)) //nolint:errcheck + h.Write([]byte(clusterID)) //nolint:errcheck + h.Write([]byte(c.Endpoint)) //nolint:errcheck + h.Write([]byte(strconv.Itoa(int(c.Port)))) //nolint:errcheck + h.Write([]byte(c.Engine)) //nolint:errcheck + h.Write([]byte(strconv.FormatBool(c.IamEnabled))) //nolint:errcheck return strconv.FormatUint(h.Sum64(), 16) } diff --git a/pkg/diagnose/connectivity/core_endpoint.go b/pkg/diagnose/connectivity/core_endpoint.go index 6291fa541041b6..9cc25b12bb934f 100644 --- a/pkg/diagnose/connectivity/core_endpoint.go +++ b/pkg/diagnose/connectivity/core_endpoint.go @@ -10,6 +10,7 @@ package connectivity import ( "context" + "errors" "fmt" "net/http" "net/http/httptrace" @@ -210,7 +211,7 @@ func sendHTTPRequestToEndpoint(ctx context.Context, client *http.Client, domain "Content-Type": endpointInfo.ContentType, "DD-API-KEY": apiKey, "DD-Agent-Version": version.AgentVersion, - "User-Agent": fmt.Sprintf("datadog-agent/%s", version.AgentVersion), + "User-Agent": "datadog-agent/" + version.AgentVersion, "X-Requested-With": requestWithHeader, } @@ -243,7 +244,7 @@ func verifyEndpointResponse(diagCfg diagnose.Config, statusCode int, responseBod } if statusCode >= 400 { - newErr = fmt.Errorf("bad request") + newErr = errors.New("bad request") verifyReport = fmt.Sprintf("Received response : '%v'\n", scrubbedResponseBody) } diff --git a/pkg/diagnose/connectivity/core_endpoint_test.go b/pkg/diagnose/connectivity/core_endpoint_test.go index d08b862e7d124a..73651ad889c230 100644 --- a/pkg/diagnose/connectivity/core_endpoint_test.go +++ b/pkg/diagnose/connectivity/core_endpoint_test.go @@ -7,7 +7,6 @@ package connectivity import ( "context" - "fmt" "io" "net/http" "net/http/httptest" @@ -162,7 +161,7 @@ func TestSendHTTPRequestHeaders(t *testing.T) { assert.Equal(t, "api_key1", r.Header.Get("DD-API-KEY")) assert.Equal(t, "application/x-protobuf", r.Header.Get("Content-Type")) assert.Equal(t, version.AgentVersion, r.Header.Get("DD-Agent-Version")) - assert.Equal(t, fmt.Sprintf("datadog-agent/%s", version.AgentVersion), r.Header.Get("User-Agent")) + assert.Equal(t, "datadog-agent/"+version.AgentVersion, r.Header.Get("User-Agent")) assert.Equal(t, requestWithHeader, r.Header.Get("X-Requested-With")) w.Write([]byte("Received Protobuf")) })) diff --git a/pkg/diagnose/connectivity/inventoryendpoint.go b/pkg/diagnose/connectivity/inventoryendpoint.go index e65cecaa8a839f..09494e946c786a 100644 --- a/pkg/diagnose/connectivity/inventoryendpoint.go +++ b/pkg/diagnose/connectivity/inventoryendpoint.go @@ -165,7 +165,7 @@ func (e *endpointDescription) buildRoute(cfg model.Reader, domain domain) string baseURL = utils.GetMainEndpoint(cfg, joinSuffix(e.prefix, "."), urlOverrideKey) } if !strings.HasPrefix(baseURL, "https://") && !strings.HasPrefix(baseURL, "http://") { - baseURL = fmt.Sprintf("https://%s", baseURL) + baseURL = "https://" + baseURL } path := e.routePath diff --git a/pkg/diagnose/connectivity/metadata.go b/pkg/diagnose/connectivity/metadata.go index d9b359de9aed5b..1b83a785f2de73 100644 --- a/pkg/diagnose/connectivity/metadata.go +++ b/pkg/diagnose/connectivity/metadata.go @@ -38,7 +38,7 @@ func DiagnoseMetadataAutodiscoveryConnectivity() []diagnose.Diagnosis { if err == nil { diagnosisString = fmt.Sprintf("Successfully connected to %s environment", name) } else { - diagnosisString = fmt.Sprintf("[Ignore if not applied] %s", err.Error()) + diagnosisString = "[Ignore if not applied] " + err.Error() } diagnoses = append(diagnoses, diagnose.Diagnosis{ diff --git a/pkg/diagnose/firewallscanner/firewallscanner.go b/pkg/diagnose/firewallscanner/firewallscanner.go index d3bfc4f135df5e..d0ca0795407904 100644 --- a/pkg/diagnose/firewallscanner/firewallscanner.go +++ b/pkg/diagnose/firewallscanner/firewallscanner.go @@ -10,6 +10,7 @@ package firewallscanner import ( "fmt" "runtime" + "strconv" "strings" "github.com/DataDog/datadog-agent/comp/core/config" @@ -115,7 +116,7 @@ func getNetflowRulesToCheck(config config.Component) []ruleToCheck { rulesToCheck = append(rulesToCheck, ruleToCheck{ firewallRule: firewallRule{ protocol: "UDP", - destPort: fmt.Sprintf("%d", destPort), + destPort: strconv.FormatUint(uint64(destPort), 10), }, source: fmt.Sprintf("netflow (%s)", flowTypeDetail.Name()), }) diff --git a/pkg/dyninst/actuator/state.go b/pkg/dyninst/actuator/state.go index a30dabf295774d..2afc1f5c6800bd 100644 --- a/pkg/dyninst/actuator/state.go +++ b/pkg/dyninst/actuator/state.go @@ -9,6 +9,7 @@ package actuator import ( "cmp" + "errors" "fmt" "slices" "time" @@ -343,7 +344,7 @@ func handleProcessesUpdated( ev eventProcessesUpdated, ) error { if sm.shuttingDown { - return fmt.Errorf("processes should not be updated during shutdown") + return errors.New("processes should not be updated during shutdown") } var before, after []probeKey @@ -841,7 +842,7 @@ func maybeDequeueProgram(sm *state, effects effectHandler) error { func handleShutdown(sm *state, effects effectHandler) error { if sm.shuttingDown { - return fmt.Errorf("state machine is already shutting down") + return errors.New("state machine is already shutting down") } sm.shuttingDown = true diff --git a/pkg/dyninst/actuator/state_snapshot_test.go b/pkg/dyninst/actuator/state_snapshot_test.go index 734a3d1a2e57a9..aa99de0f72b4f1 100644 --- a/pkg/dyninst/actuator/state_snapshot_test.go +++ b/pkg/dyninst/actuator/state_snapshot_test.go @@ -10,6 +10,7 @@ package actuator import ( "bufio" "bytes" + "errors" "fmt" "maps" "os" @@ -367,14 +368,14 @@ func parseEventsFromNode( eventsNode *yaml.Node, ) ([]yamlEvent, []*yaml.Node, error) { if eventsNode.Kind != yaml.DocumentNode || len(eventsNode.Content) != 1 { - return nil, nil, fmt.Errorf( + return nil, nil, errors.New( "expected document with single content node", ) } listNode := eventsNode.Content[0] if listNode.Kind != yaml.SequenceNode { - return nil, nil, fmt.Errorf("expected sequence node for events list") + return nil, nil, errors.New("expected sequence node for events list") } events := make([]yamlEvent, len(listNode.Content)) diff --git a/pkg/dyninst/compiler/generate.go b/pkg/dyninst/compiler/generate.go index e2c3f32a99367b..f52189d0219661 100644 --- a/pkg/dyninst/compiler/generate.go +++ b/pkg/dyninst/compiler/generate.go @@ -9,6 +9,7 @@ package compiler import ( "cmp" + stderrors "errors" "fmt" "math" "slices" @@ -688,7 +689,7 @@ func (g *generator) EncodeLocationOp(pc uint64, op *ir.LocationOp, ops []Op) ([] OutputOffset: paddedOffset - op.Offset, }) case ir.Addr: - return nil, fmt.Errorf("unsupported addr location op") + return nil, stderrors.New("unsupported addr location op") } } } diff --git a/pkg/dyninst/decode/decoder.go b/pkg/dyninst/decode/decoder.go index 3c4724baec5cec..57bdab5895f1da 100644 --- a/pkg/dyninst/decode/decoder.go +++ b/pkg/dyninst/decode/decoder.go @@ -278,7 +278,7 @@ func (s *message) init( EvaluationErrors: []evaluationError{}, } if event.EntryOrLine == nil { - return nil, fmt.Errorf("entry event is nil") + return nil, errors.New("entry event is nil") } if err := decoder.entryOrLine.init( event.EntryOrLine, decoder.program.Types, &s.Debugger.EvaluationErrors, @@ -308,7 +308,7 @@ func (s *message) init( } returnProbeEvent := decoder.probeEvents[decoder._return.rootType.ID] if returnProbeEvent.probe != probe { - return nil, fmt.Errorf("return probe event has different probe than entry probe") + return nil, errors.New("return probe event has different probe than entry probe") } returnHeader, err = event.Return.Header() if err != nil { @@ -342,7 +342,7 @@ func (s *message) init( s.Debugger.EvaluationErrors, evaluationError{ Expression: missingReturnReasonExpression, - Message: fmt.Sprintf("no return value available: %s", reason), + Message: "no return value available: " + reason, }, ) } diff --git a/pkg/dyninst/decode/marshal.go b/pkg/dyninst/decode/marshal.go index b492aac845853e..c1825a8d24dbfd 100644 --- a/pkg/dyninst/decode/marshal.go +++ b/pkg/dyninst/decode/marshal.go @@ -141,11 +141,11 @@ func (m *messageData) processJSONSegment( // Check presence bit using same logic as processExpression. presenceBitsetSize := ev.rootType.PresenceBitsetSize if int(presenceBitsetSize) > len(ev.rootData) { - return fmt.Errorf("presence bitset is out of bounds") + return errors.New("presence bitset is out of bounds") } presenceBitSet := bitset(ev.rootData[:presenceBitsetSize]) if exprIdx >= int(presenceBitsetSize)*8 { - return fmt.Errorf("expression index out of bounds") + return errors.New("expression index out of bounds") } if !presenceBitSet.get(exprIdx) { // Expression evaluation failed. @@ -161,7 +161,7 @@ func (m *messageData) processJSONSegment( exprDataStart := expr.Offset exprDataEnd := exprDataStart + expr.Expression.Type.GetByteSize() if exprDataEnd > uint32(len(ev.rootData)) { - return fmt.Errorf("expression data out of bounds") + return errors.New("expression data out of bounds") } exprData := ev.rootData[exprDataStart:exprDataEnd] diff --git a/pkg/dyninst/decode/types.go b/pkg/dyninst/decode/types.go index 5266fdf93b6f42..6080469fb23d53 100644 --- a/pkg/dyninst/decode/types.go +++ b/pkg/dyninst/decode/types.go @@ -629,7 +629,7 @@ func (b *baseType) formatValueFields( kind, ok := b.GetGoKind() if !ok { if !writeBoundedFallback( - buf, limits, fmt.Sprintf("unknown kind for type %s", b.GetName()), + buf, limits, "unknown kind for type "+b.GetName(), ) { return nil } @@ -712,7 +712,7 @@ func (h *goHMapHeaderType) encodeValueFields( ) error { maxOffset := max(h.countOffset+8, h.bucketsOffset+8, h.oldBucketsOffset+8) if maxOffset > uint32(len(data)) { - return fmt.Errorf("data is too short to contain all fields") + return errors.New("data is too short to contain all fields") } count := binary.NativeEndian.Uint64(data[h.countOffset : h.countOffset+8]) return encodeMapEntries(enc, count, func() (int, error) { @@ -983,13 +983,13 @@ func (b *goHMapBucketType) irType() ir.Type { return (*ir.GoHMapBucketType)(b) } func (*goHMapBucketType) encodeValueFields( *encodingContext, *jsontext.Encoder, []byte, ) error { - return fmt.Errorf("hmap bucket type is never directly encoded") + return errors.New("hmap bucket type is never directly encoded") } func (*goHMapBucketType) formatValueFields( *encodingContext, *bytes.Buffer, []byte, *formatLimits, ) error { - return fmt.Errorf("hmap bucket type is never directly formatted") + return errors.New("hmap bucket type is never directly formatted") } func (s *goSwissMapHeaderType) irType() ir.Type { return s.GoSwissMapHeaderType } @@ -1875,7 +1875,7 @@ func (s *goStringDataType) encodeValueFields( func (s *goStringDataType) formatValueFields( *encodingContext, *bytes.Buffer, []byte, *formatLimits, ) error { - return fmt.Errorf("string data is not formatted") + return errors.New("string data is not formatted") } func (c *goChannelType) irType() ir.Type { return (*ir.GoChannelType)(c) } @@ -2097,7 +2097,7 @@ func (u *unresolvedPointeeType) encodeValueFields( func (u *unresolvedPointeeType) formatValueFields( *encodingContext, *bytes.Buffer, []byte, *formatLimits, ) error { - return fmt.Errorf("depth limit reached") + return errors.New("depth limit reached") } func getFieldByName(fields []ir.Field, name string) (*ir.Field, error) { diff --git a/pkg/dyninst/dwarf/loclist/fix.go b/pkg/dyninst/dwarf/loclist/fix.go index e8b891c6e26179..f838bfa19d36b6 100644 --- a/pkg/dyninst/dwarf/loclist/fix.go +++ b/pkg/dyninst/dwarf/loclist/fix.go @@ -9,7 +9,7 @@ package loclist import ( - "fmt" + "errors" "github.com/DataDog/datadog-agent/pkg/dyninst/ir" ) @@ -120,7 +120,7 @@ func fixLoclists(loclists []ir.Location, expectedByteSize uint32) ([]ir.Location } if !found { - return nil, fmt.Errorf("could not compact pieces") + return nil, errors.New("could not compact pieces") } // Remove and merge pieces at the found index diff --git a/pkg/dyninst/dwarf/loclist/locations.go b/pkg/dyninst/dwarf/loclist/locations.go index d1ce8af6c6042d..11d7814d7df8e9 100644 --- a/pkg/dyninst/dwarf/loclist/locations.go +++ b/pkg/dyninst/dwarf/loclist/locations.go @@ -9,6 +9,7 @@ package loclist import ( "debug/dwarf" + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/dyninst/ir" @@ -42,7 +43,7 @@ func ProcessLocations( return nil, err } if len(loclist.Default) > 0 { - return nil, fmt.Errorf("unexpected default location pieces") + return nil, errors.New("unexpected default location pieces") } locations = loclist.Locations diff --git a/pkg/dyninst/dwarf/loclist/parse.go b/pkg/dyninst/dwarf/loclist/parse.go index 8c3ed405d7b947..046acfa0c0fef2 100644 --- a/pkg/dyninst/dwarf/loclist/parse.go +++ b/pkg/dyninst/dwarf/loclist/parse.go @@ -11,6 +11,7 @@ package loclist import ( "bytes" "encoding/binary" + "errors" "fmt" "math" @@ -108,7 +109,7 @@ func ParseInstructions(data []byte, ptrSize uint8, totalByteSize uint32) ([]ir.P op = ir.Addr{Addr: offset} case opcode == dw_op_deref: - return nil, fmt.Errorf("unsupported DW_OP_deref") + return nil, errors.New("unsupported DW_OP_deref") case dw_const_op_lo <= opcode && opcode <= dw_const_op_hi: return nil, fmt.Errorf("unsupported DW_OP_const* opcode: 0x%x", opcode) diff --git a/pkg/dyninst/dwarf/loclist/reader.go b/pkg/dyninst/dwarf/loclist/reader.go index 6543b93ddd2a2a..c6a67b0de07b19 100644 --- a/pkg/dyninst/dwarf/loclist/reader.go +++ b/pkg/dyninst/dwarf/loclist/reader.go @@ -12,6 +12,7 @@ import ( "bytes" "debug/dwarf" "encoding/binary" + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/dyninst/dwarf/dwarfutil" @@ -93,11 +94,11 @@ func (r *Reader) Read(unit *dwarf.Entry, offset int64, typeByteSize uint32) (Loc loclist, err = readDwarf2(data, r.ptrSize, typeByteSize) } else { if r.debugAddr == nil { - return Loclist{}, fmt.Errorf("missing debug_addr section") + return Loclist{}, errors.New("missing debug_addr section") } addrBase, ok := unit.Val(dwarf.AttrAddrBase).(int64) if !ok { - return Loclist{}, fmt.Errorf("missing addr_base attribute") + return Loclist{}, errors.New("missing addr_base attribute") } if addrBase > int64(len(r.debugAddr)) { return Loclist{}, fmt.Errorf("addr base %d out of bounds for section length %d", addrBase, len(r.debugAddr)) diff --git a/pkg/dyninst/ebpfbench/main.go b/pkg/dyninst/ebpfbench/main.go index 62bf574721d7f2..5d466f263dae50 100644 --- a/pkg/dyninst/ebpfbench/main.go +++ b/pkg/dyninst/ebpfbench/main.go @@ -10,6 +10,7 @@ package main import ( "context" + "errors" "fmt" "os" "os/exec" @@ -123,7 +124,7 @@ func runBenchmark() error { textSection := obj.Section(".text") if textSection == nil { - return fmt.Errorf("no .text section found") + return errors.New("no .text section found") } var allAttached []link.Link for _, attachpoint := range program.Attachpoints { diff --git a/pkg/dyninst/end_to_end_test.go b/pkg/dyninst/end_to_end_test.go index 68df08e88e1a2a..55ca9f667ff25e 100644 --- a/pkg/dyninst/end_to_end_test.go +++ b/pkg/dyninst/end_to_end_test.go @@ -411,7 +411,7 @@ func createRemoteConfigPath(product data.Product, id string, data []byte) string func getRcTesterEnv(rcHost string, rcPort int, tmpDir string) []string { return []string{ - fmt.Sprintf("DD_AGENT_HOST=%s", rcHost), + "DD_AGENT_HOST=" + rcHost, fmt.Sprintf("DD_AGENT_PORT=%d", rcPort), "DD_DYNAMIC_INSTRUMENTATION_ENABLED=true", "DD_REMOTE_CONFIGURATION_ENABLED=true", @@ -492,7 +492,7 @@ func startSampleServiceWithDocker( require.NoError(t, tarFile.Close()) containerTag := strings.ReplaceAll(strings.ReplaceAll(cfg.tmpDir, "/", "_"), ":", "_") - containerName := fmt.Sprintf("dyninst-e2e:%s", containerTag) + containerName := "dyninst-e2e:" + containerTag // Build the docker image. dockerBuildCmd := exec.Command("docker", "image", "import", tarPath, containerName) out, err := dockerBuildCmd.CombinedOutput() diff --git a/pkg/dyninst/exprlang/exprlang.go b/pkg/dyninst/exprlang/exprlang.go index 52406545b3a745..b59087ab22eb54 100644 --- a/pkg/dyninst/exprlang/exprlang.go +++ b/pkg/dyninst/exprlang/exprlang.go @@ -11,6 +11,7 @@ package exprlang import ( "bytes" + "errors" "fmt" "sync" @@ -73,7 +74,7 @@ func (d *pooledDecoder) put() { // Parse parses a DSL JSON expression into a strongly-typed AST node. func Parse(dslJSON []byte) (Expr, error) { if len(dslJSON) == 0 { - return nil, fmt.Errorf("parse error: empty DSL expression") + return nil, errors.New("parse error: empty DSL expression") } pooled := getPooledDecoder(dslJSON) defer pooled.put() @@ -123,7 +124,7 @@ func Parse(dslJSON []byte) (Expr, error) { refValue := val.String() if refValue == "" { - return nil, fmt.Errorf("parse error: ref value cannot be empty") + return nil, errors.New("parse error: ref value cannot be empty") } if err := readClosingBrace(); err != nil { diff --git a/pkg/dyninst/gosym/symtab.go b/pkg/dyninst/gosym/symtab.go index 0eea3a43d6c99f..1c54f5c70b6949 100644 --- a/pkg/dyninst/gosym/symtab.go +++ b/pkg/dyninst/gosym/symtab.go @@ -14,6 +14,7 @@ package gosym import ( "bytes" "encoding/binary" + "errors" "fmt" "iter" "math" @@ -235,7 +236,7 @@ type lineTable struct { func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, error) { if len(data) < 8 { - return nil, fmt.Errorf("pclntab too short") + return nil, errors.New("pclntab too short") } magic := binary.LittleEndian.Uint32(data[0:4]) @@ -256,7 +257,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro // Check pad bytes if data[4] != 0 || data[5] != 0 { - return nil, fmt.Errorf("unexpected pclntab header bytes") + return nil, errors.New("unexpected pclntab header bytes") } quantum := uint32(data[6]) @@ -297,7 +298,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off3) >= len(data) { - return nil, fmt.Errorf("invalid funcnametab offset") + return nil, errors.New("invalid funcnametab offset") } funcnametab := [2]int{int(off3), len(data)} @@ -306,7 +307,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off4) >= len(data) { - return nil, fmt.Errorf("invalid cutab offset") + return nil, errors.New("invalid cutab offset") } cutab := [2]int{int(off4), len(data)} @@ -315,7 +316,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off5) >= len(data) { - return nil, fmt.Errorf("invalid filetab offset") + return nil, errors.New("invalid filetab offset") } filetab := [2]int{int(off5), len(data)} @@ -324,7 +325,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off6) >= len(data) { - return nil, fmt.Errorf("invalid pc_tab offset") + return nil, errors.New("invalid pc_tab offset") } pcTab := [2]int{int(off6), len(data)} @@ -333,13 +334,13 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off7) >= len(data) { - return nil, fmt.Errorf("invalid funcdata offset") + return nil, errors.New("invalid funcdata offset") } base := int(off7) fieldSize := 4 // For ver118 and later, functab fields are 4 bytes required := (int(nfunctab)*2 + 1) * fieldSize if len(data) < base+required { - return nil, fmt.Errorf("pclntab too short for functab data") + return nil, errors.New("pclntab too short for functab data") } functab := [2]int{base, base + required} funcdata := [2]int{base, len(data)} @@ -380,7 +381,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off2) >= len(data) { - return nil, fmt.Errorf("invalid funcnametab offset") + return nil, errors.New("invalid funcnametab offset") } funcnametab := [2]int{int(off2), len(data)} @@ -389,7 +390,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off3) >= len(data) { - return nil, fmt.Errorf("invalid cutab offset") + return nil, errors.New("invalid cutab offset") } cutab := [2]int{int(off3), len(data)} @@ -398,7 +399,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off4) >= len(data) { - return nil, fmt.Errorf("invalid filetab offset") + return nil, errors.New("invalid filetab offset") } filetab := [2]int{int(off4), len(data)} @@ -407,7 +408,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off5) >= len(data) { - return nil, fmt.Errorf("invalid pc_tab offset") + return nil, errors.New("invalid pc_tab offset") } pcTab := [2]int{int(off5), len(data)} @@ -416,14 +417,14 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro return nil, err } if int(off6) >= len(data) { - return nil, fmt.Errorf("invalid funcdata offset") + return nil, errors.New("invalid funcdata offset") } base := int(off6) fieldSize := functabFieldSize(ptrSize, version) functabSize := (int(nfunctab)*2 + 1) * fieldSize if len(data) < base+functabSize { - return nil, fmt.Errorf("pclntab too short for functab data") + return nil, errors.New("pclntab too short for functab data") } functab := [2]int{base, base + functabSize} funcdata := [2]int{base, len(data)} @@ -450,12 +451,12 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro var nfunctab uint32 if ptrSize == 8 { if len(data) < 8+8 { - return nil, fmt.Errorf("pclntab too short for nfunctab") + return nil, errors.New("pclntab too short for nfunctab") } nfunctab = uint32(binary.LittleEndian.Uint64(data[8 : 8+8])) } else { if len(data) < 8+4 { - return nil, fmt.Errorf("pclntab too short for nfunctab") + return nil, errors.New("pclntab too short for nfunctab") } nfunctab = binary.LittleEndian.Uint32(data[8 : 8+4]) } @@ -463,17 +464,17 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro functabOffset := 8 + ptrSize functabSize := (int(nfunctab)*2 + 1) * ptrSize if len(data) < functabOffset+functabSize { - return nil, fmt.Errorf("pclntab too short for functab") + return nil, errors.New("pclntab too short for functab") } functab := [2]int{functabOffset, functabOffset + functabSize} if len(data) < functab[1]+4 { - return nil, fmt.Errorf("pclntab too short for filetab offset") + return nil, errors.New("pclntab too short for filetab offset") } filetabOffset := binary.LittleEndian.Uint32(data[functab[1] : functab[1]+4]) if int(filetabOffset)+4 > len(data) { - return nil, fmt.Errorf("filetab offset out of bounds") + return nil, errors.New("filetab offset out of bounds") } nfiletab := binary.LittleEndian.Uint32(data[filetabOffset : filetabOffset+4]) @@ -500,7 +501,7 @@ func parselineTable(data []byte, textRange, pcRange [2]uint64) (*lineTable, erro }, nil default: - return nil, fmt.Errorf("unsupported pclntab version") + return nil, errors.New("unsupported pclntab version") } } @@ -579,7 +580,7 @@ func (lt *lineTable) funcInfo(i uint32) (funcInfo, error) { actualOffset := lt.funcdata[0] + int(funcOff) if actualOffset >= len(lt.data) { - return funcInfo{}, fmt.Errorf("function offset out of bounds") + return funcInfo{}, errors.New("function offset out of bounds") } return funcInfo{ @@ -1123,14 +1124,14 @@ func (ft *funcTab) count() uint32 { func (ft *funcTab) pc(i uint32) (uint64, error) { if i >= ft.nfunctab { - return 0, fmt.Errorf("function index out of range") + return 0, errors.New("function index out of range") } fieldSize := functabFieldSize(ft.ptrSize, ft.version) offset := ft.functab[0] + int(2*i)*fieldSize if offset+fieldSize > len(ft.data) { - return 0, fmt.Errorf("function table entry out of bounds") + return 0, errors.New("function table entry out of bounds") } var pc uint64 @@ -1153,14 +1154,14 @@ func (ft *funcTab) pc(i uint32) (uint64, error) { func (ft *funcTab) funcOff(i uint32) (uint64, error) { if i >= ft.nfunctab { - return 0, fmt.Errorf("function index out of range") + return 0, errors.New("function index out of range") } fieldSize := functabFieldSize(ft.ptrSize, ft.version) offset := ft.functab[0] + int(2*i+1)*fieldSize if offset+fieldSize > len(ft.data) { - return 0, fmt.Errorf("function offset out of bounds") + return 0, errors.New("function offset out of bounds") } var funcOff uint64 diff --git a/pkg/dyninst/gotype/gotypecli/main.go b/pkg/dyninst/gotype/gotypecli/main.go index cd5dc55442faa4..2b1de79d455b9d 100644 --- a/pkg/dyninst/gotype/gotypecli/main.go +++ b/pkg/dyninst/gotype/gotypecli/main.go @@ -48,7 +48,7 @@ func run(binaryPath string, typelinks bool) (err error) { tl := mef.Section(".typelink") if tl == nil { - return fmt.Errorf("no .typelink section") + return errors.New("no .typelink section") } tlMap, err := mef.SectionData(tl) if err != nil { diff --git a/pkg/dyninst/gotype/table.go b/pkg/dyninst/gotype/table.go index 6d3eba667d88d1..1d368b6196b015 100644 --- a/pkg/dyninst/gotype/table.go +++ b/pkg/dyninst/gotype/table.go @@ -9,6 +9,7 @@ package gotype import ( "encoding/binary" + "errors" "fmt" "io" "iter" @@ -44,7 +45,7 @@ func NewTable(obj object.File) (*Table, error) { return s.Addr <= moduleData.Types && moduleData.Types < s.Addr+s.Size }) if idx == -1 { - return nil, fmt.Errorf("section containing types not found") + return nil, errors.New("section containing types not found") } const rodataName = ".rodata" rodata := sections[idx] diff --git a/pkg/dyninst/irgen/cli/analyze.go b/pkg/dyninst/irgen/cli/analyze.go index f848b19e1aa4fe..1ff8b3933485c1 100644 --- a/pkg/dyninst/irgen/cli/analyze.go +++ b/pkg/dyninst/irgen/cli/analyze.go @@ -74,7 +74,7 @@ func analyze(path string, method string) error { &rcjson.SnapshotProbe{ LogProbeCommon: rcjson.LogProbeCommon{ ProbeCommon: rcjson.ProbeCommon{ - ID: fmt.Sprintf("probe_%s", method), + ID: "probe_" + method, Where: &rcjson.Where{MethodName: method}, }, }, diff --git a/pkg/dyninst/irgen/irgen.go b/pkg/dyninst/irgen/irgen.go index 11832e666971a7..9da87ad757f266 100644 --- a/pkg/dyninst/irgen/irgen.go +++ b/pkg/dyninst/irgen/irgen.go @@ -215,10 +215,10 @@ func generateIR( } } if commonTypes.G == nil { - return nil, fmt.Errorf("runtime.g not found") + return nil, errors.New("runtime.g not found") } if commonTypes.M == nil { - return nil, fmt.Errorf("runtime.m not found") + return nil, errors.New("runtime.m not found") } // Materialize before creating probes so IR subprograms and vars exist. @@ -311,7 +311,7 @@ func generateIR( textSection := section{header: objFile.Section(".text")} if textSection.header == nil { - return nil, fmt.Errorf("failed to find text section") + return nil, errors.New("failed to find text section") } textSection.data, err = objFile.SectionData(textSection.header) if err != nil { @@ -680,7 +680,7 @@ func newTemplate(td ir.TemplateDefinition) *ir.Template { } case *exprlang.GetMemberExpr: case *exprlang.UnsupportedExpr: - msg := fmt.Sprintf("unsupported operation: %s", expr.Operation) + msg := "unsupported operation: " + expr.Operation addInvalid(segment, msg) continue default: @@ -1080,7 +1080,7 @@ func expandTypesWithBudgets( return fmt.Errorf("failed to get next entry: %w", err) } if entry == nil { - return fmt.Errorf("unexpected EOF while reading type") + return errors.New("unexpected EOF while reading type") } name, err := getAttr[string](entry, dwarf.AttrName) if err != nil { @@ -1595,7 +1595,7 @@ func processDwarf( } if v.goRuntimeInformation == (ir.GoModuledataInfo{}) { - return processedDwarf{}, fmt.Errorf("runtime.firstmoduledata not found") + return processedDwarf{}, errors.New("runtime.firstmoduledata not found") } return processedDwarf{ pendingSubprograms: append(v.subprograms, inlinedSubprograms...), @@ -1920,7 +1920,7 @@ func findStructSizeAndMemberOffset( return 0, 0, fmt.Errorf("expected struct type, got %s", entry.Tag) } if !entry.Children { - return 0, 0, fmt.Errorf("struct type has no children") + return 0, 0, errors.New("struct type has no children") } structSize, err := getAttr[int64](entry, dwarf.AttrByteSize) if err != nil { @@ -1936,7 +1936,7 @@ func findStructSizeAndMemberOffset( return 0, 0, fmt.Errorf("failed to get next child: %w", err) } if child == nil { - return 0, 0, fmt.Errorf("unexpected EOF while reading struct type") + return 0, 0, errors.New("unexpected EOF while reading struct type") } if child.Tag == 0 { break @@ -2412,7 +2412,7 @@ func iterConcreteSubprograms( ) } if len(currentSubprogram.ranges) == 0 { - return fmt.Errorf("no ranges for concrete subprogram entry") + return errors.New("no ranges for concrete subprogram entry") } slices.SortFunc(currentSubprogram.ranges, cmpRange) @@ -2760,7 +2760,7 @@ func completeGoStringType(tc *typeCatalog, st *ir.StructureType) error { strDataType := &ir.GoStringDataType{ TypeCommon: ir.TypeCommon{ ID: tc.idAlloc.next(), - Name: fmt.Sprintf("%s.str", st.Name), + Name: st.Name + ".str", DynamicSizeClass: ir.DynamicSizeString, ByteSize: 1, }, @@ -2796,7 +2796,7 @@ func completeGoSliceType(tc *typeCatalog, st *ir.StructureType) error { arrayDataType := &ir.GoSliceDataType{ TypeCommon: ir.TypeCommon{ ID: tc.idAlloc.next(), - Name: fmt.Sprintf("%s.array", st.Name), + Name: st.Name + ".array", DynamicSizeClass: ir.DynamicSizeSlice, ByteSize: elementType.GetByteSize(), }, @@ -3155,7 +3155,7 @@ func resolveExpression( if ptrType, ok := currentType.(*ir.PointerType); ok { // Check for void pointer. if _, isVoid := ptrType.Pointee.(*ir.VoidPointerType); isVoid { - return ir.Expression{}, fmt.Errorf( + return ir.Expression{}, errors.New( "cannot dereference void pointer", ) } @@ -3242,7 +3242,7 @@ func resolveExpression( if ptrType, ok := currentType.(*ir.PointerType); ok { // Check for void pointer. if _, isVoid := ptrType.Pointee.(*ir.VoidPointerType); isVoid { - return ir.Expression{}, fmt.Errorf( + return ir.Expression{}, errors.New( "cannot dereference void pointer", ) } diff --git a/pkg/dyninst/irgen/irgen_memory_use_test.go b/pkg/dyninst/irgen/irgen_memory_use_test.go index cfbd46006e59f4..1b4054606c79bc 100644 --- a/pkg/dyninst/irgen/irgen_memory_use_test.go +++ b/pkg/dyninst/irgen/irgen_memory_use_test.go @@ -9,7 +9,6 @@ package irgen_test import ( "bufio" - "fmt" "io" "os" "os/exec" @@ -44,7 +43,7 @@ func TestIrgenMemoryUse(t *testing.T) { stderrPath := filepath.Join(tmpDir, "irgen-memory-use-test.stderr") env := append( os.Environ(), - fmt.Sprintf("%s=true", internalEnv), + internalEnv+"=true", "GOMAXPROCS=1", "GODEBUG=gctrace=1", "--test.run=TestIrgenMemoryUseInternal", diff --git a/pkg/dyninst/irgen/type_catalog.go b/pkg/dyninst/irgen/type_catalog.go index 8d8fe2956bf6cc..78092e5c60051b 100644 --- a/pkg/dyninst/irgen/type_catalog.go +++ b/pkg/dyninst/irgen/type_catalog.go @@ -9,6 +9,7 @@ package irgen import ( "debug/dwarf" + "errors" "fmt" "math" "reflect" @@ -80,7 +81,7 @@ func (c *typeCatalog) addType(offset dwarf.Offset) (ret ir.Type, retErr error) { return t, nil } if pt != nil && ppt != pt { - return nil, fmt.Errorf("bug: multiple pointee placeholder types found") + return nil, errors.New("bug: multiple pointee placeholder types found") } pt = ppt } @@ -94,7 +95,7 @@ func (c *typeCatalog) addType(offset dwarf.Offset) (ret ir.Type, retErr error) { return nil, fmt.Errorf("failed to get next entry: %w", err) } if entry == nil { - return nil, fmt.Errorf("unexpected EOF while reading type") + return nil, errors.New("unexpected EOF while reading type") } if entry.Tag != dwarf.TagTypedef || entry.AttrField(dwAtGoKind) != nil { break @@ -104,7 +105,7 @@ func (c *typeCatalog) addType(offset dwarf.Offset) (ret ir.Type, retErr error) { return nil, fmt.Errorf("failed to get type for typedef: %w", err) } if numOffsets++; numOffsets > maxTypedefDepth { - return nil, fmt.Errorf("long typedef chain detected") + return nil, errors.New("long typedef chain detected") } offsets[numOffsets-1] = typeOffset } @@ -168,7 +169,7 @@ func (c *typeCatalog) buildType( var haveCount bool var count uint32 if !entry.Children { - return nil, fmt.Errorf("array type has no children") + return nil, errors.New("array type has no children") } arrayChildren: for { @@ -177,7 +178,7 @@ func (c *typeCatalog) buildType( return nil, fmt.Errorf("failed to get next child: %w", err) } if child == nil { - return nil, fmt.Errorf( + return nil, errors.New( "unexpected EOF while reading array type", ) } @@ -203,7 +204,7 @@ func (c *typeCatalog) buildType( if haveCount { break arrayChildren } - return nil, fmt.Errorf("unexpected end of array type") + return nil, errors.New("unexpected end of array type") } } @@ -240,7 +241,7 @@ func (c *typeCatalog) buildType( }, nil case dwarf.TagPointerType: if entry.Children { - return nil, fmt.Errorf("unexpected children for pointer type") + return nil, errors.New("unexpected children for pointer type") } if common.ByteSize == 0 { common.ByteSize = uint32(c.ptrSize) @@ -276,7 +277,7 @@ func (c *typeCatalog) buildType( return nil, err } if pointeeEntry == nil { - return nil, fmt.Errorf( + return nil, errors.New( "unexpected EOF while reading pointee type", ) } @@ -297,7 +298,7 @@ func (c *typeCatalog) buildType( return nil, err } if pointeeEntry == nil { - return nil, fmt.Errorf("unexpected EOF while reading pointee type") + return nil, errors.New("unexpected EOF while reading pointee type") } } else { haveUnderlyingType = true @@ -332,7 +333,7 @@ func (c *typeCatalog) buildType( case dwarf.TagStructType: if !entry.Children { - return nil, fmt.Errorf("structure type has no children") + return nil, errors.New("structure type has no children") } fields, err := collectMembers(childReader, c) if err != nil { @@ -467,7 +468,7 @@ func processInterfaceTypedef( return nil, err } if nextEntry == nil { - return nil, fmt.Errorf( + return nil, errors.New( "unexpected EOF while reading underlying type", ) } @@ -481,7 +482,7 @@ func processInterfaceTypedef( return "", 0, nil, err } if underlyingEntry.Tag == 0 { - return "", 0, nil, fmt.Errorf("unexpected end of underlying type") + return "", 0, nil, errors.New("unexpected end of underlying type") } } if underlyingEntry.Tag != dwarf.TagStructType { @@ -513,7 +514,7 @@ func processInterfaceTypedef( return "", 0, nil, fmt.Errorf("failed to get next child: %w", err) } if child == nil { - return "", 0, nil, fmt.Errorf( + return "", 0, nil, errors.New( "unexpected EOF while reading underlying type", ) } @@ -602,7 +603,7 @@ structChildren: return nil, fmt.Errorf("failed to get next child: %w", err) } if child == nil { - return nil, fmt.Errorf( + return nil, errors.New( "unexpected EOF while reading structure type", ) } diff --git a/pkg/dyninst/irgen/type_index_on_disk.go b/pkg/dyninst/irgen/type_index_on_disk.go index 45c045525f30a3..21864bb4d24f5e 100644 --- a/pkg/dyninst/irgen/type_index_on_disk.go +++ b/pkg/dyninst/irgen/type_index_on_disk.go @@ -73,7 +73,7 @@ type onDiskGoTypeToOffsetIndexBuilder struct { // AddType implements typeIndexBuilder. func (o *onDiskGoTypeToOffsetIndexBuilder) addType(typeID gotype.TypeID, dwarfOffset dwarf.Offset) error { if o.w == nil { - return fmt.Errorf("builder is closed") + return errors.New("builder is closed") } o.entryBuf = goTypeOffsetEntry{typeID: typeID, dwarfOffset: dwarfOffset} buf := unsafe.Slice((*uint8)(unsafe.Pointer(&o.entryBuf)), unsafe.Sizeof(o.entryBuf)) @@ -86,7 +86,7 @@ func (o *onDiskGoTypeToOffsetIndexBuilder) addType(typeID gotype.TypeID, dwarfOf // Build implements typeIndexBuilder. func (o *onDiskGoTypeToOffsetIndexBuilder) build() (_ goTypeToOffsetIndex, retErr error) { if o.w == nil { - return nil, fmt.Errorf("builder is closed") + return nil, errors.New("builder is closed") } defer func() { if retErr != nil { @@ -160,7 +160,7 @@ var _ methodToGoTypeIndexBuilder = (*onDiskMethodToGoTypeIndexBuilder)(nil) func (o *onDiskMethodToGoTypeIndexBuilder) addMethod(method gotype.Method, receiver gotype.TypeID) error { if o.w == nil { - return fmt.Errorf("builder is closed") + return errors.New("builder is closed") } o.entryBuf = [3]uint32{uint32(method.Name), uint32(method.Mtyp), uint32(receiver)} buf := unsafe.Slice((*uint8)(unsafe.Pointer(&o.entryBuf)), unsafe.Sizeof(o.entryBuf)) @@ -171,7 +171,7 @@ func (o *onDiskMethodToGoTypeIndexBuilder) addMethod(method gotype.Method, recei } func (o *onDiskMethodToGoTypeIndexBuilder) build() (_ methodToGoTypeIndex, retErr error) { if o.w == nil { - return nil, fmt.Errorf("builder is closed") + return nil, errors.New("builder is closed") } defer func() { o.w = nil }() if err := o.w.Flush(); err != nil { diff --git a/pkg/dyninst/irgen/visit.go b/pkg/dyninst/irgen/visit.go index 305ebd3f8dce1d..51d348d43e3f1c 100644 --- a/pkg/dyninst/irgen/visit.go +++ b/pkg/dyninst/irgen/visit.go @@ -9,6 +9,7 @@ package irgen import ( "debug/dwarf" + "errors" "fmt" ) @@ -73,7 +74,7 @@ func visitReader( ) } if child == nil { - return fmt.Errorf( + return errors.New( "visitReader: unexpected EOF while reading children", ) } diff --git a/pkg/dyninst/loader/loader.go b/pkg/dyninst/loader/loader.go index 3df3a43e1448cf..394a67098b3264 100644 --- a/pkg/dyninst/loader/loader.go +++ b/pkg/dyninst/loader/loader.go @@ -95,7 +95,7 @@ func (l *Loader) Load(program compiler.Program) (*Program, error) { ringbufMapSpec, ok := spec.Maps[ringbufMapName] if !ok { - return nil, fmt.Errorf("ringbuffer map not found in eBPF spec") + return nil, errors.New("ringbuffer map not found in eBPF spec") } ringbufMapSpec.MaxEntries = uint32(l.config.ringBufSize) @@ -112,7 +112,7 @@ func (l *Loader) Load(program compiler.Program) (*Program, error) { } bpfProgram, ok := collection.Programs["probe_run_with_cookie"] if !ok { - return nil, fmt.Errorf("probe_run_with_cookie program not found in collection") + return nil, errors.New("probe_run_with_cookie program not found in collection") } maps = nil @@ -287,7 +287,7 @@ func (l *Loader) init(opts ...Option) error { stripRelocations(l.ebpfSpec) ringbufMapSpec, ok := l.ebpfSpec.Maps[ringbufMapName] if !ok { - return fmt.Errorf("ringbuffer map not found in eBPF spec") + return errors.New("ringbuffer map not found in eBPF spec") } ringbufMapSpec.MaxEntries = uint32(l.config.ringBufSize) return nil @@ -441,7 +441,7 @@ func (l *Loader) loadData( mapSpec, ok := spec.Maps[throttlerStateMapName] if !ok { - return nil, fmt.Errorf("throttler_buf map not found in eBPF spec") + return nil, errors.New("throttler_buf map not found in eBPF spec") } mapSpec.MaxEntries = uint32(len(serialized.throttlerParams)) @@ -505,7 +505,7 @@ func setCommonConstants(spec *ebpf.CollectionSpec, serialized *serializedProgram m := serialized.commonTypes.M stack, ok := g.FieldByName("stack") if !ok { - return fmt.Errorf("stack field not found in runtime.g") + return errors.New("stack field not found in runtime.g") } stackStruct, ok := stack.Type.(*ir.StructureType) if !ok { diff --git a/pkg/dyninst/loader/serialize.go b/pkg/dyninst/loader/serialize.go index d147d46ccd464f..50eb9cbc39669f 100644 --- a/pkg/dyninst/loader/serialize.go +++ b/pkg/dyninst/loader/serialize.go @@ -11,6 +11,7 @@ package loader import ( "bytes" "cmp" + "errors" "fmt" "slices" "sort" @@ -110,7 +111,7 @@ func serializeProgram( var ok bool serialized.chasePointersEntrypoint, ok = metadata.FunctionLoc[compiler.ChasePointers{}] if !ok { - return nil, fmt.Errorf("serialized program is missing ChasePointers function") + return nil, errors.New("serialized program is missing ChasePointers function") } slices.SortFunc(program.Types, func(a, b ir.Type) int { diff --git a/pkg/dyninst/module/config.go b/pkg/dyninst/module/config.go index 69888a236e2c05..aa9ed071bdd60f 100644 --- a/pkg/dyninst/module/config.go +++ b/pkg/dyninst/module/config.go @@ -8,6 +8,7 @@ package module import ( + "errors" "fmt" "net" "net/url" @@ -149,7 +150,7 @@ const ( symdbUploaderPath = "/symdb/v1/input" ) -var errSchemeRequired = fmt.Errorf("scheme is required") +var errSchemeRequired = errors.New("scheme is required") // Parse the trace agent URL from the environment variables, falling back to the // default. diff --git a/pkg/dyninst/module/symbol.go b/pkg/dyninst/module/symbol.go index 46d10c448ee885..e5d88fa47d1d01 100644 --- a/pkg/dyninst/module/symbol.go +++ b/pkg/dyninst/module/symbol.go @@ -8,6 +8,7 @@ package module import ( + "errors" "fmt" "io" @@ -50,7 +51,7 @@ func newSymbolicator(executable actuator.Executable) (_ symbol.Symbolicator, c i c = symbolTable symbolicator := symbol.NewGoSymbolicator(&symbolTable.GoSymbolTable) if symbolicator == nil { - return nil, nil, fmt.Errorf("error creating go symbolicator") + return nil, nil, errors.New("error creating go symbolicator") } // TODO: make this configurable diff --git a/pkg/dyninst/object/decompression.go b/pkg/dyninst/object/decompression.go index 7cfccf68743df9..aff16640e85fb9 100644 --- a/pkg/dyninst/object/decompression.go +++ b/pkg/dyninst/object/decompression.go @@ -10,6 +10,7 @@ package object import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "strings" @@ -111,7 +112,7 @@ func (r compressedSectionMetadata) data( switch r.format { case compressionFormatUnknown: - return nil, fmt.Errorf("unknown compression format") + return nil, errors.New("unknown compression format") case compressionFormatNone: return mef.mmap(uint64(r.offset), uint64(r.uncompressedLength)) case compressionFormatZlib: @@ -234,7 +235,7 @@ func readCompressedFileRange( // // See https://github.com/golang/go/blob/d166a0b0/src/debug/elf/file.go#L557-L579 if s.Flags&elf_SHF_ALLOC != 0 { - return r, fmt.Errorf("SHF_COMPRESSED applies only to non-allocable sections") + return r, errors.New("SHF_COMPRESSED applies only to non-allocable sections") } sr := io.NewSectionReader(fileReaderAt, int64(s.Offset), int64(s.FileSize)) diff --git a/pkg/dyninst/object/disk_cache.go b/pkg/dyninst/object/disk_cache.go index e3c86d017a2f1d..a31d481d8342ee 100644 --- a/pkg/dyninst/object/disk_cache.go +++ b/pkg/dyninst/object/disk_cache.go @@ -57,7 +57,7 @@ type DiskCacheConfig struct { func (cfg *DiskCacheConfig) validate() error { if cfg.DirPath == "" { - return fmt.Errorf("dirPath must not be empty") + return errors.New("dirPath must not be empty") } if cfg.RequiredDiskSpacePercent < 0 || cfg.RequiredDiskSpacePercent > 100 { return fmt.Errorf( @@ -66,7 +66,7 @@ func (cfg *DiskCacheConfig) validate() error { ) } if cfg.MaxTotalBytes == 0 { - return fmt.Errorf("maxTotalBytes must not be zero") + return errors.New("maxTotalBytes must not be zero") } return nil } @@ -412,17 +412,17 @@ type DiskFile struct { // crashes; it remains accessible via its file descriptor until closed. func (c *DiskCache) NewFile(name string, maxSize, initialSize uint64) (_ *DiskFile, retErr error) { if name == "" { - return nil, fmt.Errorf("name must not be empty") + return nil, errors.New("name must not be empty") } if initialSize > maxSize { - return nil, fmt.Errorf("initialSize must be <= maxSize") + return nil, errors.New("initialSize must be <= maxSize") } // If the disk doesn't have enough space, we can't create the file. if err := c.checker.check(maxSize); err != nil { return nil, err } if base := path.Base(name); base != name { - return nil, fmt.Errorf("name must not contain path separators") + return nil, errors.New("name must not contain path separators") } f, err := os.CreateTemp(c.dirPath, fmt.Sprintf("%s.%d", name, os.Getpid())) if err != nil { @@ -463,7 +463,7 @@ func (c *DiskCache) NewFile(name string, maxSize, initialSize uint64) (_ *DiskFi // necessary. If the write would exceed the max size, it fails. func (df *DiskFile) Write(p []byte) (int, error) { if df == nil || df.f == nil || df.closed { - return 0, fmt.Errorf("write on closed DiskFile") + return 0, errors.New("write on closed DiskFile") } if len(p) == 0 { return 0, nil @@ -532,10 +532,10 @@ func (df *DiskFile) IntoMMap(flags int) (_ SectionData, retErr error) { } }() if df == nil || df.f == nil || df.closed { - return nil, fmt.Errorf("IntoMMap on closed DiskFile") + return nil, errors.New("IntoMMap on closed DiskFile") } if df.used == 0 { - return nil, fmt.Errorf("cannot mmap empty DiskFile") + return nil, errors.New("cannot mmap empty DiskFile") } if err := df.f.Sync(); err != nil { return nil, fmt.Errorf("failed to sync DiskFile: %w", err) diff --git a/pkg/dyninst/object/elf_file_with_dwarf.go b/pkg/dyninst/object/elf_file_with_dwarf.go index 733fa179ec1cfb..abf204fd263dc4 100644 --- a/pkg/dyninst/object/elf_file_with_dwarf.go +++ b/pkg/dyninst/object/elf_file_with_dwarf.go @@ -192,7 +192,7 @@ func (d *dwarfData) init(f File) (retErr error) { } info := d.debugSections.Info() if info == nil { - return fmt.Errorf("no .debug_info section found") + return errors.New("no .debug_info section found") } dwarfData, err := d.debugSections.loadDwarfData() if err != nil { diff --git a/pkg/dyninst/object/gomoduledata.go b/pkg/dyninst/object/gomoduledata.go index 257786bb63d3b9..3fe5b24483ef7f 100644 --- a/pkg/dyninst/object/gomoduledata.go +++ b/pkg/dyninst/object/gomoduledata.go @@ -97,7 +97,7 @@ func (m *GoDebugSections) Close() error { func (m *ModuleData) GoDebugSections(mef File) (*GoDebugSections, error) { pclntabSection := mef.Section(".gopclntab") if pclntabSection == nil { - return nil, fmt.Errorf("no pclntab section") + return nil, errors.New("no pclntab section") } pclntab, err := mef.SectionDataRange(pclntabSection, 0, pclntabSection.Size) @@ -109,7 +109,7 @@ func (m *ModuleData) GoDebugSections(mef File) (*GoDebugSections, error) { if m.GoFunc != 0 && m.GcData != 0 { rodataSection := mef.Section(".rodata") if rodataSection.Addr > m.GoFunc || m.GcData > rodataSection.Addr+rodataSection.Size { - return nil, fmt.Errorf("gofunc outside rodata section") + return nil, errors.New("gofunc outside rodata section") } offset := m.GoFunc - rodataSection.Addr size := m.GcData - m.GoFunc @@ -129,22 +129,22 @@ func (m *ModuleData) GoDebugSections(mef File) (*GoDebugSections, error) { func parseModuleData(obj SectionLoader) (*ModuleData, error) { pclntabSection := obj.Section(".gopclntab") if pclntabSection == nil { - return nil, fmt.Errorf("no pclntab section") + return nil, errors.New("no pclntab section") } noptrdataSection := obj.Section(".noptrdata") if noptrdataSection == nil { - return nil, fmt.Errorf("no noptrdata section") + return nil, errors.New("no noptrdata section") } rodataSection := obj.Section(".rodata") if rodataSection == nil { - return nil, fmt.Errorf("no rodata section") + return nil, errors.New("no rodata section") } textSection := obj.Section(".text") if textSection == nil { - return nil, fmt.Errorf("no text section") + return nil, errors.New("no text section") } noptrdataSectionData, err := obj.SectionDataRange(noptrdataSection, 0, noptrdataSection.Size) @@ -180,7 +180,7 @@ func parseModuleData(obj SectionLoader) (*ModuleData, error) { } } - return nil, fmt.Errorf("no valid moduledata found") + return nil, errors.New("no valid moduledata found") } func tryParseModuleDataAt( @@ -193,7 +193,7 @@ func tryParseModuleDataAt( // Parse types range typesStart := offset + ModuledataTypesOffset if typesStart+16 > len(noptrdataData) { - return nil, fmt.Errorf("types data out of bounds") + return nil, errors.New("types data out of bounds") } types := binary.LittleEndian.Uint64(noptrdataData[typesStart:]) etypes := binary.LittleEndian.Uint64(noptrdataData[typesStart+8:]) @@ -201,29 +201,29 @@ func tryParseModuleDataAt( // Parse text range textStart := offset + ModuledataTextOffset if textStart+16 > len(noptrdataData) { - return nil, fmt.Errorf("text data out of bounds") + return nil, errors.New("text data out of bounds") } text := binary.LittleEndian.Uint64(noptrdataData[textStart:]) etext := binary.LittleEndian.Uint64(noptrdataData[textStart+8:]) // Validate ranges if types > etypes || types < rodataRange[0] || etypes > rodataRange[1] { - return nil, fmt.Errorf("invalid types range") + return nil, errors.New("invalid types range") } if text > etext || text < textRange[0] || etext > textRange[1] { - return nil, fmt.Errorf("invalid text range") + return nil, errors.New("invalid text range") } // Parse textsect map textsectMapOffset := offset + ModuledataTextsectMapOffset if textsectMapOffset+16 > len(noptrdataData) { - return nil, fmt.Errorf("textsect map data out of bounds") + return nil, errors.New("textsect map data out of bounds") } textsectMapPtr := binary.LittleEndian.Uint64(noptrdataData[textsectMapOffset:]) textsectMapLen := binary.LittleEndian.Uint64(noptrdataData[textsectMapOffset+8:]) if textsectMapPtr < rodataSection.Addr { - return nil, fmt.Errorf("textsect map pointer out of range") + return nil, errors.New("textsect map pointer out of range") } textsectSize := uint64(unsafe.Sizeof(TextSect{})) @@ -231,7 +231,7 @@ func tryParseModuleDataAt( textsectMapDataLen := int(textsectMapLen * textsectSize) if textsectMapDataOffset < 0 || textsectMapDataOffset+textsectMapDataLen > len(rodataData) { - return nil, fmt.Errorf("textsect map data out of bounds") + return nil, errors.New("textsect map data out of bounds") } textsectMapData := rodataData[textsectMapDataOffset : textsectMapDataOffset+textsectMapDataLen] @@ -250,7 +250,7 @@ func tryParseModuleDataAt( // Parse BSS range bssOffset := offset + ModuledataBssOffset if bssOffset+16 > len(noptrdataData) { - return nil, fmt.Errorf("bss data out of bounds") + return nil, errors.New("bss data out of bounds") } bss := binary.LittleEndian.Uint64(noptrdataData[bssOffset:]) ebss := binary.LittleEndian.Uint64(noptrdataData[bssOffset+8:]) @@ -258,21 +258,21 @@ func tryParseModuleDataAt( // Parse gofunc offset gofuncOffset := offset + ModuledataGofuncOffset if gofuncOffset+8 > len(noptrdataData) { - return nil, fmt.Errorf("gofunc data out of bounds") + return nil, errors.New("gofunc data out of bounds") } gofunc := binary.LittleEndian.Uint64(noptrdataData[gofuncOffset:]) // Parse gcdata offset gcdataOffset := offset + ModuledataGcdataOffset if gcdataOffset+8 > len(noptrdataData) { - return nil, fmt.Errorf("gcdata data out of bounds") + return nil, errors.New("gcdata data out of bounds") } gcdata := binary.LittleEndian.Uint64(noptrdataData[gcdataOffset:]) // Parse min/max PC minPCOffset := offset + ModuledataMinPCOffset if minPCOffset+16 > len(noptrdataData) { - return nil, fmt.Errorf("minPC data out of bounds") + return nil, errors.New("minPC data out of bounds") } minPC := binary.LittleEndian.Uint64(noptrdataData[minPCOffset:]) maxPC := binary.LittleEndian.Uint64(noptrdataData[minPCOffset+8:]) diff --git a/pkg/dyninst/object/goversion.go b/pkg/dyninst/object/goversion.go index 4998696e4a6730..e4edc408550abf 100644 --- a/pkg/dyninst/object/goversion.go +++ b/pkg/dyninst/object/goversion.go @@ -9,6 +9,7 @@ package object import ( "encoding/binary" + "errors" "fmt" "regexp" "strconv" @@ -46,7 +47,7 @@ func ReadGoVersion(mef File) (*GoVersion, error) { } if buildVersionSym == nil { - return nil, fmt.Errorf("runtime.buildVersion not found") + return nil, errors.New("runtime.buildVersion not found") } // Find the section containing the symbol @@ -59,7 +60,7 @@ func ReadGoVersion(mef File) (*GoVersion, error) { } if section == nil { - return nil, fmt.Errorf("section containing runtime.buildVersion not found") + return nil, errors.New("section containing runtime.buildVersion not found") } // Read the string @@ -85,7 +86,7 @@ func readString(mef SectionLoader, section *safeelf.SectionHeader, address, size offset := address - section.Addr if offset+size > uint64(len(msData)) { - return "", fmt.Errorf("string data out of bounds") + return "", errors.New("string data out of bounds") } // Handle string header based on size @@ -93,14 +94,14 @@ func readString(mef SectionLoader, section *safeelf.SectionHeader, address, size if size == 8 { // 32-bit pointers if offset+8 > uint64(len(msData)) { - return "", fmt.Errorf("not enough data for 32-bit string header") + return "", errors.New("not enough data for 32-bit string header") } dataAddr = uint64(binary.LittleEndian.Uint32(msData[offset:])) dataSize = uint64(binary.LittleEndian.Uint32(msData[offset+4:])) } else if size == 16 { // 64-bit pointers if offset+16 > uint64(len(msData)) { - return "", fmt.Errorf("not enough data for 64-bit string header") + return "", errors.New("not enough data for 64-bit string header") } dataAddr = binary.LittleEndian.Uint64(msData[offset:]) dataSize = binary.LittleEndian.Uint64(msData[offset+8:]) @@ -118,7 +119,7 @@ func readString(mef SectionLoader, section *safeelf.SectionHeader, address, size } if dataSection == nil { - return "", fmt.Errorf("failed to find data section") + return "", errors.New("failed to find data section") } mds, err := mef.SectionData(dataSection) @@ -130,7 +131,7 @@ func readString(mef SectionLoader, section *safeelf.SectionHeader, address, size dataOffset := dataAddr - dataSection.Addr if dataOffset+dataSize > uint64(len(mdsData)) { - return "", fmt.Errorf("string data out of bounds in data section") + return "", errors.New("string data out of bounds in data section") } return string(mdsData[dataOffset : dataOffset+dataSize]), nil diff --git a/pkg/dyninst/object/mmapping_elf_file.go b/pkg/dyninst/object/mmapping_elf_file.go index 437f6284593bbf..e2521cc1ab93a7 100644 --- a/pkg/dyninst/object/mmapping_elf_file.go +++ b/pkg/dyninst/object/mmapping_elf_file.go @@ -63,7 +63,7 @@ func (m *MMappingElfFile) SectionDataRange( s *safeelf.SectionHeader, offset, length uint64, ) (SectionData, error) { if s.Flags&safeelf.SHF_COMPRESSED != 0 { - return nil, fmt.Errorf("mmapping compressed sections is not supported") + return nil, errors.New("mmapping compressed sections is not supported") } if offset+length > s.Size { return nil, fmt.Errorf("out of section range: %d+%d > %d", offset, length, s.Size) diff --git a/pkg/dyninst/procsubscribe/procscan/list_pids_test.go b/pkg/dyninst/procsubscribe/procscan/list_pids_test.go index 1bd161b8f0ecc3..b67254db2983b8 100644 --- a/pkg/dyninst/procsubscribe/procscan/list_pids_test.go +++ b/pkg/dyninst/procsubscribe/procscan/list_pids_test.go @@ -14,6 +14,7 @@ import ( "os" "path/filepath" "slices" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -75,7 +76,7 @@ func TestListPids(t *testing.T) { dir := t.TempDir() const pageSize = 3 for i := range pageSize * 5 { - p := fmt.Sprintf("%d", i+1) + p := strconv.Itoa(i + 1) require.NoError(t, os.Mkdir(filepath.Join(dir, p), 0o755)) } seq := listPidsChunks(dir, pageSize) diff --git a/pkg/dyninst/procsubscribe/procscan/proc_stat_reader.go b/pkg/dyninst/procsubscribe/procscan/proc_stat_reader.go index e23227079abdd2..711a45ca2d3366 100644 --- a/pkg/dyninst/procsubscribe/procscan/proc_stat_reader.go +++ b/pkg/dyninst/procsubscribe/procscan/proc_stat_reader.go @@ -90,12 +90,12 @@ func startTimeTicksFromProcStat( buf = buf[len(pidBytes):] commStart := bytes.IndexByte(buf, '(') if commStart == -1 { - return 0, fmt.Errorf("comm not found in stat file") + return 0, errors.New("comm not found in stat file") } buf = buf[commStart+1:] commEnd := bytes.LastIndexByte(buf, ')') if commEnd == -1 { - return 0, fmt.Errorf("comm not found in stat file") + return 0, errors.New("comm not found in stat file") } buf = buf[commEnd+1:] fieldIdx := 2 // we've read the pid and comm @@ -108,7 +108,7 @@ func startTimeTicksFromProcStat( } } if len(fieldData) == 0 { - return 0, fmt.Errorf("starttime not found in stat file") + return 0, errors.New("starttime not found in stat file") } starttime := unsafe.String(unsafe.SliceData(fieldData), len(fieldData)) diff --git a/pkg/dyninst/procsubscribe/procscan/scanner_test.go b/pkg/dyninst/procsubscribe/procscan/scanner_test.go index 172dcd26966112..30f9af911e1924 100644 --- a/pkg/dyninst/procsubscribe/procscan/scanner_test.go +++ b/pkg/dyninst/procsubscribe/procscan/scanner_test.go @@ -10,6 +10,7 @@ package procscan import ( "bufio" "bytes" + "errors" "fmt" "iter" "os" @@ -290,7 +291,7 @@ type scanCommand struct{} func (c *scanCommand) execute(_ *testing.T, ts *scannerTestState) error { if !ts.initialized { - return fmt.Errorf( + return errors.New( "scanner not initialized: use !initialize command first", ) } @@ -463,12 +464,12 @@ func parseCommandsFromAST( file *ast.File, ) ([]command, []ast.Node, error) { if len(file.Docs) == 0 { - return nil, nil, fmt.Errorf("no documents in file") + return nil, nil, errors.New("no documents in file") } doc := file.Docs[0] if doc.Body == nil { - return nil, nil, fmt.Errorf("empty document") + return nil, nil, errors.New("empty document") } // The body should be a sequence. diff --git a/pkg/dyninst/symdb/cli/main.go b/pkg/dyninst/symdb/cli/main.go index 9d77c9ae0cbdef..2d23bc47e6e426 100644 --- a/pkg/dyninst/symdb/cli/main.go +++ b/pkg/dyninst/symdb/cli/main.go @@ -107,7 +107,7 @@ func run() (retErr error) { if *imageName == "" { // No image specified: treat binaryPathFlag as a local file if *binaryPathFlag == "" { - return fmt.Errorf("-binary-path is required when -image is not specified") + return errors.New("-binary-path is required when -image is not specified") } info, err := os.Stat(*binaryPathFlag) if err != nil { @@ -157,7 +157,7 @@ func run() (retErr error) { *silent = true if *uploadURL != "" && *uploadSite != "" { - return fmt.Errorf("only one of -upload-url or -upload-side must be specified") + return errors.New("only one of -upload-url or -upload-side must be specified") } if *uploadSite == "" { *uploadSite = "datad0g.com" @@ -167,7 +167,7 @@ func run() (retErr error) { } if *uploadAPIKey == "" { - return fmt.Errorf("-api-key must be specified when -upload is used") + return errors.New("-api-key must be specified when -upload is used") } var err error @@ -177,7 +177,7 @@ func run() (retErr error) { } if *uploadService == "" || *uploadVersion == "" { - return fmt.Errorf("when --upload is specified, --service and --version must also be specified") + return errors.New("when --upload is specified, --service and --version must also be specified") } } diff --git a/pkg/dyninst/symdb/symdb.go b/pkg/dyninst/symdb/symdb.go index e2ca7742cabab4..295f5f558403e6 100644 --- a/pkg/dyninst/symdb/symdb.go +++ b/pkg/dyninst/symdb/symdb.go @@ -17,6 +17,7 @@ import ( "math" "slices" "sort" + "strconv" "strings" "time" @@ -308,7 +309,7 @@ func (v Variable) Serialize(w StringWriter, indent string) { w.WriteString(": ") w.WriteString(v.TypeName) w.WriteString(" (declared at line ") - w.WriteString(fmt.Sprintf("%d", v.DeclLine)) + w.WriteString(strconv.Itoa(v.DeclLine)) w.WriteString(", available: ") for i, r := range v.AvailableLineRanges { if i > 0 { diff --git a/pkg/dyninst/testprogs/test_progs.go b/pkg/dyninst/testprogs/test_progs.go index d90ce6e665c8d7..c6af6a7909f820 100644 --- a/pkg/dyninst/testprogs/test_progs.go +++ b/pkg/dyninst/testprogs/test_progs.go @@ -324,13 +324,13 @@ func (m *Config) Validate() error { switch m.GOARCH { case Amd64, Arm64: case "": - return fmt.Errorf("GOARCH is required") + return errors.New("GOARCH is required") default: return fmt.Errorf("GOARCH is invalid: %q", m.GOARCH) } if m.GOTOOLCHAIN == "" { - return fmt.Errorf("GOTOOLCHAIN is required") + return errors.New("GOTOOLCHAIN is required") } if !goVersionRegex.MatchString(m.GOTOOLCHAIN) { return fmt.Errorf("GOTOOLCHAIN is invalid: %q", m.GOTOOLCHAIN) diff --git a/pkg/dyninst/uploader/batcher_snapshot_test.go b/pkg/dyninst/uploader/batcher_snapshot_test.go index 668c0ae7f5e048..f7bd85277becbd 100644 --- a/pkg/dyninst/uploader/batcher_snapshot_test.go +++ b/pkg/dyninst/uploader/batcher_snapshot_test.go @@ -11,6 +11,7 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "io/fs" "os" @@ -321,7 +322,7 @@ func (y *yamlBatchEvent) UnmarshalYAML(node *yaml.Node) error { } var err error if !v.Success { - err = fmt.Errorf("failed") + err = errors.New("failed") } y.event = batchOutcomeEvent{id: batchID(v.ID), err: err} y.advance = 0 @@ -356,9 +357,9 @@ func (se *snapshotEffects) sendBatch(id batchID, batch []json.RawMessage) { } n := &yaml.Node{Tag: "!send-batch", Kind: yaml.MappingNode, Style: yaml.FlowStyle} n.Content = []*yaml.Node{ - {Kind: yaml.ScalarNode, Value: "id"}, {Kind: yaml.ScalarNode, Value: fmt.Sprintf("%d", id)}, - {Kind: yaml.ScalarNode, Value: "items"}, {Kind: yaml.ScalarNode, Value: fmt.Sprintf("%d", len(batch))}, - {Kind: yaml.ScalarNode, Value: "bytes"}, {Kind: yaml.ScalarNode, Value: fmt.Sprintf("%d", batchSize(batch))}, + {Kind: yaml.ScalarNode, Value: "id"}, {Kind: yaml.ScalarNode, Value: strconv.FormatUint(uint64(id), 10)}, + {Kind: yaml.ScalarNode, Value: "items"}, {Kind: yaml.ScalarNode, Value: strconv.Itoa(len(batch))}, + {Kind: yaml.ScalarNode, Value: "bytes"}, {Kind: yaml.ScalarNode, Value: strconv.Itoa(batchSize(batch))}, } se.nodes = append(se.nodes, n) } diff --git a/pkg/dyninst/uploader/batcher_state.go b/pkg/dyninst/uploader/batcher_state.go index 5a7303c5dc5f76..d016c7f16b810a 100644 --- a/pkg/dyninst/uploader/batcher_state.go +++ b/pkg/dyninst/uploader/batcher_state.go @@ -9,6 +9,7 @@ package uploader import ( "encoding/json" + "errors" "fmt" "time" @@ -89,7 +90,7 @@ func (s *batcherState) handleEnqueueEvent(data json.RawMessage, now time.Time, e // modified in this case -- but it does imply an invariant violation. func (s *batcherState) handleTimerFiredEvent(eff effects) error { if !s.timerSet { - return fmt.Errorf("timer fired event received but timer is not set") + return errors.New("timer fired event received but timer is not set") } s.flush(eff) return nil diff --git a/pkg/dyninst/uprobe/attach.go b/pkg/dyninst/uprobe/attach.go index 16a76a48cc37f9..8caa8d36939b54 100644 --- a/pkg/dyninst/uprobe/attach.go +++ b/pkg/dyninst/uprobe/attach.go @@ -79,7 +79,7 @@ func Attach( textSection := elfFile.Section(".text") if textSection == nil { - return nil, fmt.Errorf("text section not found") + return nil, errors.New("text section not found") } // As close to injection as possible, check that executable that we analyzed diff --git a/pkg/ebpf/btf.go b/pkg/ebpf/btf.go index bf300cc7cd9d01..0f179dc37a2400 100644 --- a/pkg/ebpf/btf.go +++ b/pkg/ebpf/btf.go @@ -109,10 +109,10 @@ type BTFResultMetadata struct { func (d BTFResultMetadata) String() string { res := fmt.Sprintf("numLoadAttempts: %d\nloaderUsed: %s", d.numLoadAttempts, d.loaderUsed) if d.filepathUsed != "" { - res += fmt.Sprintf("\nfilepathUsed: %s", d.filepathUsed) + res += "\nfilepathUsed: " + d.filepathUsed } if d.tarballUsed != "" { - res += fmt.Sprintf("\ntarballUsed: %s", d.tarballUsed) + res += "\ntarballUsed: " + d.tarballUsed } return res } diff --git a/pkg/ebpf/btf_test.go b/pkg/ebpf/btf_test.go index 1af059aca02abb..c905fb4104c68e 100644 --- a/pkg/ebpf/btf_test.go +++ b/pkg/ebpf/btf_test.go @@ -8,7 +8,7 @@ package ebpf import ( - "fmt" + "errors" "os" "path/filepath" "runtime" @@ -78,7 +78,7 @@ func TestBTFTelemetry(t *testing.T) { func curDir() (string, error) { _, file, _, ok := runtime.Caller(0) if !ok { - return "", fmt.Errorf("unable to get current file build path") + return "", errors.New("unable to get current file build path") } buildDir := filepath.Dir(file) diff --git a/pkg/ebpf/bytecode/permissions_windows.go b/pkg/ebpf/bytecode/permissions_windows.go index 2833e82a9e082f..7f83bdbe4e4b96 100644 --- a/pkg/ebpf/bytecode/permissions_windows.go +++ b/pkg/ebpf/bytecode/permissions_windows.go @@ -8,10 +8,10 @@ package bytecode import ( - "fmt" + "errors" ) // VerifyAssetPermissions is for verifying the permissions of bpf programs func VerifyAssetPermissions(_ string) error { - return fmt.Errorf("verification of bpf assets is not supported on windows") + return errors.New("verification of bpf assets is not supported on windows") } diff --git a/pkg/ebpf/bytecode/runtime/helpers.go b/pkg/ebpf/bytecode/runtime/helpers.go index 067c02bdd87f49..2b02be72bb118b 100644 --- a/pkg/ebpf/bytecode/runtime/helpers.go +++ b/pkg/ebpf/bytecode/runtime/helpers.go @@ -10,6 +10,7 @@ package runtime import ( "bufio" "bytes" + "errors" "fmt" "io" "os" @@ -94,7 +95,7 @@ func getAvailableHelpers(kernelHeaders []string) ([]string, error) { lastLine = scanner.Bytes() } if len(lastLine) == 0 { - return nil, fmt.Errorf("empty output") + return nil, errors.New("empty output") } funcs := strings.Split(string(lastLine), ", ") diff --git a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go index e0123f2129bee1..f8850be99866cc 100644 --- a/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go +++ b/pkg/ebpf/bytecode/runtime/runtime_compilation_helpers.go @@ -83,7 +83,7 @@ func compileToObjectFile(inFile, outputDir, filename, inHash string, additionalF return nil, compilationErr, fmt.Errorf("error getting helper availability: %w", err) } defer os.Remove(helperPath) - flags = append(flags, fmt.Sprintf("-include%s", helperPath)) + flags = append(flags, "-include"+helperPath) } log.Debugf("compiling runtime version of %s to %s", filename, outputFile) @@ -113,7 +113,7 @@ func compileToObjectFile(inFile, outputDir, filename, inHash string, additionalF func computeFlagsAndHash(additionalFlags []string) ([]string, string) { flags := make([]string, 0, len(defaultFlags)+len(additionalFlags)+1) - flags = append(flags, fmt.Sprintf("-D__TARGET_ARCH_%s", kernel.Arch())) + flags = append(flags, "-D__TARGET_ARCH_"+kernel.Arch()) flags = append(flags, defaultFlags...) flags = append(flags, additionalFlags...) @@ -121,7 +121,7 @@ func computeFlagsAndHash(additionalFlags []string) ([]string, string) { for _, f := range flags { hasher.Write([]byte(f)) } - flagHash := fmt.Sprintf("%x", hasher.Sum(nil)) + flagHash := hex.EncodeToString(hasher.Sum(nil)) return flags, flagHash } diff --git a/pkg/ebpf/co_re.go b/pkg/ebpf/co_re.go index 6be4557513d946..be26b3a511b2b7 100644 --- a/pkg/ebpf/co_re.go +++ b/pkg/ebpf/co_re.go @@ -65,7 +65,7 @@ func (c *coreAssetLoader) loadCOREAsset(filename string, startFn func(bytecode.A return fmt.Errorf("BTF load: %w", err) } if ret == nil { - return fmt.Errorf("no BTF data") + return errors.New("no BTF data") } buf, err := bytecode.GetReader(c.coreDir, filename) diff --git a/pkg/ebpf/compiler/compiler.go b/pkg/ebpf/compiler/compiler.go index 1553ef5d42ac44..55a208deecaf33 100644 --- a/pkg/ebpf/compiler/compiler.go +++ b/pkg/ebpf/compiler/compiler.go @@ -52,13 +52,13 @@ func kernelHeaderPaths(headerDirs []string) []string { var paths []string for _, d := range headerDirs { paths = append(paths, - fmt.Sprintf("%s/arch/%s/include", d, arch), - fmt.Sprintf("%s/arch/%s/include/generated", d, arch), - fmt.Sprintf("%s/include", d), - fmt.Sprintf("%s/arch/%s/include/uapi", d, arch), - fmt.Sprintf("%s/arch/%s/include/generated/uapi", d, arch), - fmt.Sprintf("%s/include/uapi", d), - fmt.Sprintf("%s/include/generated/uapi", d), + d+"/arch/"+arch+"/include", + d+"/arch/"+arch+"/include/generated", + d+"/include", + d+"/arch/"+arch+"/include/uapi", + d+"/arch/"+arch+"/include/generated/uapi", + d+"/include/uapi", + d+"/include/generated/uapi", ) } return paths @@ -71,11 +71,11 @@ func CompileToObjectFile(inFile, outputFile string, cflags []string, headerDirs return err } defer os.RemoveAll(tmpIncludeDir) - cflags = append(cflags, fmt.Sprintf("-isystem%s", tmpIncludeDir)) + cflags = append(cflags, "-isystem"+tmpIncludeDir) kps := kernelHeaderPaths(headerDirs) for _, p := range kps { - cflags = append(cflags, fmt.Sprintf("-isystem%s", p)) + cflags = append(cflags, "-isystem"+p) } cflags = append(cflags, "-c", "-x", "c", "-o", "-", inFile) @@ -174,11 +174,11 @@ func Preprocess(in io.Reader, out io.Writer, cflags []string, headerDirs []strin return err } defer os.RemoveAll(tmpIncludeDir) - cflags = append(cflags, fmt.Sprintf("-isystem%s", tmpIncludeDir)) + cflags = append(cflags, "-isystem"+tmpIncludeDir) kps := kernelHeaderPaths(headerDirs) for _, p := range kps { - cflags = append(cflags, fmt.Sprintf("-isystem%s", p)) + cflags = append(cflags, "-isystem"+p) } cflags = append(cflags, "-E", "-x", "c", "-o", "-", "-") diff --git a/pkg/ebpf/probes.go b/pkg/ebpf/probes.go index 5cc3ea53b53247..b890c43e07bad5 100644 --- a/pkg/ebpf/probes.go +++ b/pkg/ebpf/probes.go @@ -8,7 +8,7 @@ package ebpf import ( - "fmt" + "errors" "runtime" "strings" @@ -34,24 +34,24 @@ func (c *Config) ChooseSyscallProbeExit(tracepoint string, fallback string) (str func (c *Config) ChooseSyscallProbe(tracepoint string, indirectProbe string, fallback string) (string, error) { tparts := strings.Split(tracepoint, "/") if len(tparts) != 3 || tparts[0] != "tracepoint" || tparts[1] != "syscalls" { - return "", fmt.Errorf("invalid tracepoint name") + return "", errors.New("invalid tracepoint name") } category := tparts[1] tpName := tparts[2] fparts := strings.Split(fallback, "/") if len(fparts) != 2 { - return "", fmt.Errorf("invalid fallback probe name") + return "", errors.New("invalid fallback probe name") } syscall := strings.TrimPrefix(fparts[1], "sys_") if indirectProbe != "" { xparts := strings.Split(indirectProbe, "/") if len(xparts) < 2 { - return "", fmt.Errorf("invalid indirect probe name") + return "", errors.New("invalid indirect probe name") } if strings.TrimPrefix(xparts[1], "sys_") != syscall { - return "", fmt.Errorf("indirect and fallback probe syscalls do not match") + return "", errors.New("indirect and fallback probe syscalls do not match") } } diff --git a/pkg/ebpf/rc_btf_test.go b/pkg/ebpf/rc_btf_test.go index 80bc9fc539c68b..826bff2518bea5 100644 --- a/pkg/ebpf/rc_btf_test.go +++ b/pkg/ebpf/rc_btf_test.go @@ -10,6 +10,7 @@ package ebpf import ( "context" "crypto/sha256" + "encoding/hex" "errors" "fmt" "io" @@ -118,7 +119,7 @@ func hashFile(path string) (string, error) { if _, err := io.Copy(h, f); err != nil { return "", fmt.Errorf("error hashing input file: %w", err) } - return fmt.Sprintf("%x", h.Sum(nil)), nil + return hex.EncodeToString(h.Sum(nil)), nil } func getCatalog(t *testing.T, shasum string) string { diff --git a/pkg/ebpf/telemetry/debugfs.go b/pkg/ebpf/telemetry/debugfs.go index fab3c8b0950bb1..5ea9903a44218a 100644 --- a/pkg/ebpf/telemetry/debugfs.go +++ b/pkg/ebpf/telemetry/debugfs.go @@ -74,8 +74,8 @@ func getProbeStats(pid int, profile string) map[string]uint64 { event = parts[1] } event = strings.ToLower(event) - res[fmt.Sprintf("%s_hits", event)] = st.Hits - res[fmt.Sprintf("%s_misses", event)] = st.Misses + res[event+"_hits"] = st.Hits + res[event+"_misses"] = st.Misses } return res diff --git a/pkg/ebpf/uprobes/attacher_test.go b/pkg/ebpf/uprobes/attacher_test.go index 5ab349ecc29c26..0b0df6de6ad5a5 100644 --- a/pkg/ebpf/uprobes/attacher_test.go +++ b/pkg/ebpf/uprobes/attacher_test.go @@ -106,7 +106,7 @@ func TestAttachPidReadsSharedLibraries(t *testing.T) { exe := "foobar" pid := uint32(1) libname := "/target/libssl.so" - maps := fmt.Sprintf("08048000-08049000 r-xp 00000000 03:00 8312 %s", libname) + maps := "08048000-08049000 r-xp 00000000 03:00 8312 " + libname procRoot := kernel.CreateFakeProcFS(t, []kernel.FakeProcFSEntry{{Pid: pid, Cmdline: exe, Command: exe, Exe: exe, Maps: maps}}) config := AttacherConfig{ ProcRoot: procRoot, diff --git a/pkg/ebpf/uprobes/inspector_test.go b/pkg/ebpf/uprobes/inspector_test.go index 2b79ed8971f952..1f802ad02002a5 100644 --- a/pkg/ebpf/uprobes/inspector_test.go +++ b/pkg/ebpf/uprobes/inspector_test.go @@ -8,7 +8,6 @@ package uprobes import ( - "fmt" "path/filepath" "runtime" "testing" @@ -25,7 +24,7 @@ func TestNativeBinarySymbolRetrieval(t *testing.T) { require.NoError(t, err) libmmap := filepath.Join(curDir, "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") - lib := filepath.Join(libmmap, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) + lib := filepath.Join(libmmap, "libssl.so."+runtime.GOARCH) fpath := utils.FilePath{HostPath: lib} setID := 0 diff --git a/pkg/ebpf/uprobes/testutil.go b/pkg/ebpf/uprobes/testutil.go index 6d1505792e2dad..eecad00f3679a7 100644 --- a/pkg/ebpf/uprobes/testutil.go +++ b/pkg/ebpf/uprobes/testutil.go @@ -129,7 +129,7 @@ func getLibSSLPath(t *testing.T) string { require.NoError(t, err) libmmap := filepath.Join(curDir, "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") - return filepath.Join(libmmap, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) + return filepath.Join(libmmap, "libssl.so."+runtime.GOARCH) } // SetRegistry allows changing the file registry used by the attacher. This is useful for testing purposes, to diff --git a/pkg/ebpf/uprobes/testutil_attacher_runner.go b/pkg/ebpf/uprobes/testutil_attacher_runner.go index 79c4c7a514a9e6..1601e8643bb018 100644 --- a/pkg/ebpf/uprobes/testutil_attacher_runner.go +++ b/pkg/ebpf/uprobes/testutil_attacher_runner.go @@ -201,7 +201,7 @@ func (r *ContainerizedFmapperRunner) Run(t *testing.T, paths ...string) { mounts[filepath.Dir(path)] = filepath.Dir(path) } - r.containerName = fmt.Sprintf("fmapper-testutil-%s", utils.RandString(10)) + r.containerName = "fmapper-testutil-" + utils.RandString(10) scanner := sharedlibstestutil.BuildFmapperScanner(t) dockerConfig := dockerutils.NewRunConfig( dockerutils.NewBaseConfig( @@ -346,7 +346,7 @@ func NewContainerizedAttacherRunner() AttacherRunner { // RunAttacher starts the attacher in a container func (r *ContainerizedAttacherRunner) RunAttacher(t *testing.T, configName AttacherTestConfigName) { - r.containerName = fmt.Sprintf("uprobe-attacher-testutil-%s", utils.RandString(10)) + r.containerName = "uprobe-attacher-testutil-" + utils.RandString(10) attacherBin := testutil.BuildStandaloneAttacher(t) // Get the ebpf config to ensure we have the same paths and config diff --git a/pkg/ebpf/verifier/calculator/main.go b/pkg/ebpf/verifier/calculator/main.go index 12c21a37909def..d48c957361aaaa 100644 --- a/pkg/ebpf/verifier/calculator/main.go +++ b/pkg/ebpf/verifier/calculator/main.go @@ -210,7 +210,7 @@ func main() { // The format of progName is "objectName/programName" so we need to make the // directory structure to ensure we can save the file in the correct place. - destPath := filepath.Join(*complexityDataDir, fmt.Sprintf("%s.json", progName)) + destPath := filepath.Join(*complexityDataDir, progName+".json") if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { log.Fatalf("failed to create directory %s: %v", filepath.Dir(destPath), err) } diff --git a/pkg/ebpf/verifier/elf.go b/pkg/ebpf/verifier/elf.go index 95291d348a7d79..499db6135adac6 100644 --- a/pkg/ebpf/verifier/elf.go +++ b/pkg/ebpf/verifier/elf.go @@ -75,7 +75,7 @@ func getLineReader(dwarfData *dwarf.Data) (*dwarf.LineReader, error) { return lineReader, nil } } - return nil, fmt.Errorf("no line reader found in DWARF data") + return nil, errors.New("no line reader found in DWARF data") } // progStartPoint defines a possible start point for a program: section index + address @@ -90,7 +90,7 @@ func buildProgStartMap(dwarfData *dwarf.Data, symToSeq map[string]int) (map[prog progStartLines := make(map[progStartPoint]string) entryReader := dwarfData.Reader() if entryReader == nil { - return nil, fmt.Errorf("cannot get dwarf reader") + return nil, errors.New("cannot get dwarf reader") } for { diff --git a/pkg/ebpf/verifier/stats.go b/pkg/ebpf/verifier/stats.go index 54582fa0526908..3ebe7ed8c5d96a 100644 --- a/pkg/ebpf/verifier/stats.go +++ b/pkg/ebpf/verifier/stats.go @@ -191,9 +191,9 @@ func generateLoadFunction(file string, opts *StatsOptions, results *StatsResult, prog := reflect.New( reflect.StructOf([]reflect.StructField{ { - Name: fmt.Sprintf("Func_%s", progSpec.Name), + Name: "Func_" + progSpec.Name, Type: reflect.TypeOf(&ebpf.Program{}), - Tag: reflect.StructTag(fmt.Sprintf(`ebpf:"%s"`, progSpec.Name)), + Tag: reflect.StructTag(`ebpf:"` + progSpec.Name + `"`), }, }), ) diff --git a/pkg/ebpf/verifier/stats_no_linux.go b/pkg/ebpf/verifier/stats_no_linux.go index 06be7c3e0c36be..00878d96150dc2 100644 --- a/pkg/ebpf/verifier/stats_no_linux.go +++ b/pkg/ebpf/verifier/stats_no_linux.go @@ -9,12 +9,10 @@ // for any loaded eBPF program package verifier -import ( - "fmt" -) +import "errors" // BuildVerifierStats accepts a list of eBPF object files and generates a // map of all programs and their Statistics func BuildVerifierStats(_ *StatsOptions) (*StatsResult, map[string]struct{}, error) { - return nil, nil, fmt.Errorf("not implemented") + return nil, nil, errors.New("not implemented") } diff --git a/pkg/errors/errors_test.go b/pkg/errors/errors_test.go index 568fa5b7c6840e..5088e4fde1af1f 100644 --- a/pkg/errors/errors_test.go +++ b/pkg/errors/errors_test.go @@ -21,8 +21,8 @@ func TestNotFound(t *testing.T) { // Is require.True(t, IsNotFound(err)) - require.False(t, IsNotFound(fmt.Errorf("fake"))) - require.False(t, IsNotFound(fmt.Errorf(`"foo" not found`))) + require.False(t, IsNotFound(errors.New("fake"))) + require.False(t, IsNotFound(errors.New(`"foo" not found`))) // Wrapped errWrapped := fmt.Errorf("context: %w", err) @@ -39,8 +39,8 @@ func TestRetriable(t *testing.T) { // Is var errFunc = func() error { return NewRetriable("foo", errors.New("bar")) } require.True(t, IsRetriable(errFunc())) - require.False(t, IsRetriable(fmt.Errorf("fake"))) - require.False(t, IsRetriable(fmt.Errorf(`couldn't fetch "foo": bar`))) + require.False(t, IsRetriable(errors.New("fake"))) + require.False(t, IsRetriable(errors.New(`couldn't fetch "foo": bar`))) // Wrapped errWrapped := fmt.Errorf("context: %w", err) diff --git a/pkg/flare/archive.go b/pkg/flare/archive.go index 898fe5dc6310f8..875d42f389a3c8 100644 --- a/pkg/flare/archive.go +++ b/pkg/flare/archive.go @@ -11,6 +11,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -221,7 +222,7 @@ func getSystemProbeConfig() ([]byte, error) { func (r *RemoteFlareProvider) getProcessAgentFullConfig() ([]byte, error) { addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { - return nil, fmt.Errorf("wrong configuration to connect to process-agent") + return nil, errors.New("wrong configuration to connect to process-agent") } procStatusURL := fmt.Sprintf("https://%s/config/all", addressPort) @@ -242,7 +243,7 @@ func (r *RemoteFlareProvider) getChecksFromProcessAgent(fb flaretypes.FlareBuild checkURL := fmt.Sprintf("https://%s/check/", addressPort) getCheck := func(checkName, setting string) { - filename := fmt.Sprintf("%s_check_output.json", checkName) + filename := checkName + "_check_output.json" if !pkgconfigsetup.Datadog().GetBool(setting) { fb.AddFile(filename, []byte(fmt.Sprintf("'%s' is disabled", setting))) //nolint:errcheck @@ -279,7 +280,7 @@ func (r *RemoteFlareProvider) getAgentTaggerList() ([]byte, error) { func (r *RemoteFlareProvider) getProcessAgentTaggerList() ([]byte, error) { addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { - return nil, fmt.Errorf("wrong configuration to connect to process-agent") + return nil, errors.New("wrong configuration to connect to process-agent") } taggerListURL := fmt.Sprintf("https://%s/agent/tagger-list", addressPort) diff --git a/pkg/flare/archive_docker.go b/pkg/flare/archive_docker.go index f93927da89513d..34970ce06ca8c2 100644 --- a/pkg/flare/archive_docker.go +++ b/pkg/flare/archive_docker.go @@ -11,6 +11,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "regexp" "strings" @@ -29,7 +30,7 @@ const dockerCommandMaxLength = 29 func getDockerSelfInspect(wmeta option.Option[workloadmeta.Component]) ([]byte, error) { if !env.IsContainerized() { - return nil, fmt.Errorf("The Agent is not containerized") + return nil, errors.New("The Agent is not containerized") } du, err := docker.GetDockerUtil() diff --git a/pkg/flare/clusteragent/archive_dca.go b/pkg/flare/clusteragent/archive_dca.go index d15dcb2ffc46b9..3699df81139035 100644 --- a/pkg/flare/clusteragent/archive_dca.go +++ b/pkg/flare/clusteragent/archive_dca.go @@ -121,7 +121,7 @@ func getMetadataMap(fb flaretypes.FlareBuilder) error { metaList := apiv1.NewMetadataResponse() cl, err := apiserver.GetAPIClient() if err != nil { - metaList.Errors = fmt.Sprintf("Can't create client to query the API Server: %s", err.Error()) + metaList.Errors = "Can't create client to query the API Server: " + err.Error() } else { // Grab the metadata map for all nodes. metaList, err = apiserver.GetMetadataMapBundleOnAllNodes(cl) diff --git a/pkg/flare/clusteragent/manifests.go b/pkg/flare/clusteragent/manifests.go index 461eeb38c34e04..87ab487c016f9f 100644 --- a/pkg/flare/clusteragent/manifests.go +++ b/pkg/flare/clusteragent/manifests.go @@ -13,9 +13,11 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "os" + "strconv" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -140,7 +142,7 @@ func getDeployedHelmConfigmap(cl *apiserver.APIClient, name string, namespace st return nil, err } if len(configmapList.Items) != 1 { - return nil, log.Errorf("%s configmaps found, but expected 1", fmt.Sprint(len(configmapList.Items))) + return nil, log.Errorf("%s configmaps found, but expected 1", strconv.Itoa(len(configmapList.Items))) } return &configmapList.Items[0], nil } @@ -158,7 +160,7 @@ func getDeployedHelmSecret(cl *apiserver.APIClient, name string, namespace strin return nil, err } if len(secretList.Items) != 1 { - return nil, log.Errorf("%s secrets found, but expected 1", fmt.Sprint(len(secretList.Items))) + return nil, log.Errorf("%s secrets found, but expected 1", strconv.Itoa(len(secretList.Items))) } return &secretList.Items[0], nil } @@ -177,7 +179,7 @@ func decodeRelease(data string) ([]byte, error) { // gzip magic header is not found if len(b) < 4 { // Avoid panic if b[0:3] cannot be accessed - return nil, log.Errorf("The byte array is too short (expected at least 4 characters, got %s instead): it cannot contain a Helm release", fmt.Sprint(len(b))) + return nil, log.Errorf("The byte array is too short (expected at least 4 characters, got %s instead): it cannot contain a Helm release", strconv.Itoa(len(b))) } if bytes.Equal(b[0:3], magicGzip) { r, err := gzip.NewReader(bytes.NewReader(b)) @@ -258,7 +260,7 @@ func getHelmValues() ([]byte, error) { return helmUserValues, nil } } - return nil, fmt.Errorf("Unable to collect Helm values from secrets/configmaps") + return nil, errors.New("Unable to collect Helm values from secrets/configmaps") } // getDatadogAgentManifest retrieves the user-defined manifest for the Datadog Agent resource (managed by the Operator) diff --git a/pkg/flare/config_check.go b/pkg/flare/config_check.go index e871549713f93b..f45132925f3172 100644 --- a/pkg/flare/config_check.go +++ b/pkg/flare/config_check.go @@ -108,7 +108,7 @@ func PrintConfigWithInstanceIDs(w io.Writer, c integration.Config, instanceIDs [ printContainerExclusionRulesInfo(w, &c) } if c.NodeName != "" { - state := fmt.Sprintf("dispatched to %s", c.NodeName) + state := "dispatched to " + c.NodeName fmt.Fprintf(w, "%s: %s\n", color.BlueString("State"), color.CyanString(state)) } fmt.Fprintln(w, "===") @@ -162,7 +162,7 @@ func PrintClusterCheckConfig(w io.Writer, c integration.Config, checkName string printContainerExclusionRulesInfo(w, &c) } if c.NodeName != "" { - state := fmt.Sprintf("dispatched to %s", c.NodeName) + state := "dispatched to " + c.NodeName fmt.Fprintf(w, "%s: %s\n", color.BlueString("State"), color.CyanString(state)) } fmt.Fprintln(w, "===") diff --git a/pkg/flare/remote_config.go b/pkg/flare/remote_config.go index 0bec0bc03c6eb1..53a7de8548f3db 100644 --- a/pkg/flare/remote_config.go +++ b/pkg/flare/remote_config.go @@ -38,7 +38,7 @@ func (r *RemoteFlareProvider) exportRemoteConfig(fb flaretypes.FlareBuilder) err ctx, cancel := context.WithCancel(context.Background()) defer cancel() md := metadata.MD{ - "authorization": []string{fmt.Sprintf("Bearer %s", r.IPC.GetAuthToken())}, // TODO IPC: Implement a GRPC secure client + "authorization": []string{"Bearer " + r.IPC.GetAuthToken()}, // TODO IPC: Implement a GRPC secure client } ctx = metadata.NewOutgoingContext(ctx, md) diff --git a/pkg/fleet/daemon/remote_config.go b/pkg/fleet/daemon/remote_config.go index 461c7819938932..a91fa9d3ec315a 100644 --- a/pkg/fleet/daemon/remote_config.go +++ b/pkg/fleet/daemon/remote_config.go @@ -7,6 +7,7 @@ package daemon import ( "encoding/json" + "errors" "fmt" "net/url" "strings" @@ -225,13 +226,13 @@ func handleUpdaterCatalogDDUpdate(h handleCatalogUpdate, firstCatalogApplied fun func validatePackage(pkg Package) error { if pkg.Name == "" { - return fmt.Errorf("package name is empty") + return errors.New("package name is empty") } if pkg.Version == "" { - return fmt.Errorf("package version is empty") + return errors.New("package version is empty") } if pkg.URL == "" { - return fmt.Errorf("package URL is empty") + return errors.New("package URL is empty") } url, err := url.Parse(pkg.URL) if err != nil { diff --git a/pkg/fleet/daemon/task_db.go b/pkg/fleet/daemon/task_db.go index f7e3d6eb23e0a9..24082f7c57a947 100644 --- a/pkg/fleet/daemon/task_db.go +++ b/pkg/fleet/daemon/task_db.go @@ -7,6 +7,7 @@ package daemon import ( "encoding/json" + "errors" "fmt" "go.etcd.io/bbolt" @@ -52,7 +53,7 @@ func (p *taskDB) SetTaskState(task requestState) error { err := p.db.Update(func(tx *bbolt.Tx) error { b := tx.Bucket(bucketTasks) if b == nil { - return fmt.Errorf("bucket not found") + return errors.New("bucket not found") } rawTask, err := json.Marshal(&task) if err != nil { @@ -72,7 +73,7 @@ func (p *taskDB) GetTasksState() (map[string]requestState, error) { err := p.db.View(func(tx *bbolt.Tx) error { b := tx.Bucket(bucketTasks) if b == nil { - return fmt.Errorf("bucket not found") + return errors.New("bucket not found") } err := b.ForEach(func(k, v []byte) error { var task requestState diff --git a/pkg/fleet/installer/config/config_windows.go b/pkg/fleet/installer/config/config_windows.go index 8d728f2c8d0f0d..1bcf6f1f54971d 100644 --- a/pkg/fleet/installer/config/config_windows.go +++ b/pkg/fleet/installer/config/config_windows.go @@ -51,7 +51,7 @@ func (d *Directories) WriteExperiment(ctx context.Context, operations Operations return fmt.Errorf("error getting state: %w", err) } if state.ExperimentDeploymentID != "" { - return fmt.Errorf("there is already an experiment in progress") + return errors.New("there is already an experiment in progress") } // Clear and recreate the experiment/backup directory err = os.RemoveAll(d.ExperimentPath) diff --git a/pkg/fleet/installer/db/db.go b/pkg/fleet/installer/db/db.go index 2a8e0e4d2b3495..bc1086d184f946 100644 --- a/pkg/fleet/installer/db/db.go +++ b/pkg/fleet/installer/db/db.go @@ -8,6 +8,7 @@ package db import ( "encoding/json" + "errors" "fmt" "time" @@ -20,7 +21,7 @@ var ( var ( // ErrPackageNotFound is returned when a package is not found - ErrPackageNotFound = fmt.Errorf("package not found") + ErrPackageNotFound = errors.New("package not found") ) // Package represents a package @@ -85,7 +86,7 @@ func (p *PackagesDB) SetPackage(pkg Package) error { err := p.db.Update(func(tx *bbolt.Tx) error { b := tx.Bucket(bucketPackages) if b == nil { - return fmt.Errorf("bucket not found") + return errors.New("bucket not found") } rawPkg, err := json.Marshal(&pkg) if err != nil { @@ -104,7 +105,7 @@ func (p *PackagesDB) DeletePackage(name string) error { err := p.db.Update(func(tx *bbolt.Tx) error { b := tx.Bucket(bucketPackages) if b == nil { - return fmt.Errorf("bucket not found") + return errors.New("bucket not found") } return b.Delete([]byte(name)) }) @@ -120,7 +121,7 @@ func (p *PackagesDB) HasPackage(name string) (bool, error) { err := p.db.View(func(tx *bbolt.Tx) error { b := tx.Bucket(bucketPackages) if b == nil { - return fmt.Errorf("bucket not found") + return errors.New("bucket not found") } v := b.Get([]byte(name)) hasPackage = len(v) > 0 @@ -138,7 +139,7 @@ func (p *PackagesDB) GetPackage(name string) (Package, error) { err := p.db.View(func(tx *bbolt.Tx) error { b := tx.Bucket(bucketPackages) if b == nil { - return fmt.Errorf("bucket not found") + return errors.New("bucket not found") } v := b.Get([]byte(name)) if len(v) == 0 { @@ -162,7 +163,7 @@ func (p *PackagesDB) ListPackages() ([]Package, error) { err := p.db.View(func(tx *bbolt.Tx) error { b := tx.Bucket(bucketPackages) if b == nil { - return fmt.Errorf("bucket not found") + return errors.New("bucket not found") } return b.ForEach(func(k, v []byte) error { // support v0.0.7 diff --git a/pkg/fleet/installer/errors/errors_test.go b/pkg/fleet/installer/errors/errors_test.go index 4cb8be7eb4a0ee..00cb002449913e 100644 --- a/pkg/fleet/installer/errors/errors_test.go +++ b/pkg/fleet/installer/errors/errors_test.go @@ -19,14 +19,14 @@ func TestGetCode(t *testing.T) { // Simple case var err error = &InstallerError{ - err: fmt.Errorf("test: test"), + err: errors.New("test: test"), code: ErrDownloadFailed, } assert.Equal(t, GetCode(err), ErrDownloadFailed) // Wrap err = fmt.Errorf("test1: %w", &InstallerError{ - err: fmt.Errorf("test2: test3"), + err: errors.New("test2: test3"), code: ErrDownloadFailed, }) assert.Equal(t, GetCode(err), ErrDownloadFailed) @@ -37,7 +37,7 @@ func TestGetCode(t *testing.T) { } func TestWrap(t *testing.T) { - err := fmt.Errorf("test: test") + err := errors.New("test: test") taskErr := Wrap(ErrDownloadFailed, err) assert.Equal(t, taskErr, &InstallerError{ err: err, @@ -60,7 +60,7 @@ func TestWrap(t *testing.T) { func TestToJSON(t *testing.T) { err := fmt.Errorf("test: %w", &InstallerError{ - err: fmt.Errorf("test2: test3"), + err: errors.New("test2: test3"), code: ErrDownloadFailed, }) assert.Equal(t, ToJSON(err), `{"error":"test: test2: test3","code":1}`) diff --git a/pkg/fleet/installer/exec/installer_exec.go b/pkg/fleet/installer/exec/installer_exec.go index 70e91c273c1757..bee06ff4ffc420 100644 --- a/pkg/fleet/installer/exec/installer_exec.go +++ b/pkg/fleet/installer/exec/installer_exec.go @@ -47,7 +47,7 @@ type installerCmd struct { } func (i *InstallerExec) newInstallerCmdCustomPathDetached(ctx context.Context, command string, path string, args ...string) *installerCmd { - span, ctx := telemetry.StartSpanFromContext(ctx, fmt.Sprintf("installer.%s", command)) + span, ctx := telemetry.StartSpanFromContext(ctx, "installer."+command) span.SetTag("args", strings.Join(args, " ")) // NOTE: We very intentionally don't provide ctx to exec.Command. // exec.Command will kill the process if the context is cancelled. We don't want that here since @@ -65,7 +65,7 @@ func (i *InstallerExec) newInstallerCmdCustomPathDetached(ctx context.Context, c } func (i *InstallerExec) newInstallerCmdCustomPath(ctx context.Context, command string, path string, args ...string) *installerCmd { - span, ctx := telemetry.StartSpanFromContext(ctx, fmt.Sprintf("installer.%s", command)) + span, ctx := telemetry.StartSpanFromContext(ctx, "installer."+command) span.SetTag("args", strings.Join(args, " ")) cmd := exec.CommandContext(ctx, path, append([]string{command}, args...)...) cmd.Stdout = os.Stdout diff --git a/pkg/fleet/installer/installer.go b/pkg/fleet/installer/installer.go index 072f15649cb8b8..5113ddb31f0848 100644 --- a/pkg/fleet/installer/installer.go +++ b/pkg/fleet/installer/installer.go @@ -482,7 +482,7 @@ func (i *installerImpl) PromoteExperiment(ctx context.Context, pkg string) error } if !state.HasExperiment() { // Fail early - return fmt.Errorf("no experiment to promote") + return errors.New("no experiment to promote") } err = i.hooks.PrePromoteExperiment(ctx, pkg) @@ -694,7 +694,7 @@ func (i *installerImpl) InstrumentAPMInjector(ctx context.Context, method string return fmt.Errorf("could not check if APM dotnet library is installed: %w", err) } if !isDotnetInstalled { - return fmt.Errorf("APM dotnet library is not installed") + return errors.New("APM dotnet library is not installed") } } else { var isInjectorInstalled bool @@ -703,7 +703,7 @@ func (i *installerImpl) InstrumentAPMInjector(ctx context.Context, method string return fmt.Errorf("could not check if APM injector is installed: %w", err) } if !isInjectorInstalled { - return fmt.Errorf("APM injector is not installed") + return errors.New("APM injector is not installed") } } @@ -727,7 +727,7 @@ func (i *installerImpl) UninstrumentAPMInjector(ctx context.Context, method stri return fmt.Errorf("could not check if APM dotnet library is installed: %w", err) } if !isDotnetInstalled { - return fmt.Errorf("APM dotnet library is not installed") + return errors.New("APM dotnet library is not installed") } } else { var isInjectorInstalled bool @@ -736,7 +736,7 @@ func (i *installerImpl) UninstrumentAPMInjector(ctx context.Context, method stri return fmt.Errorf("could not check if APM injector is installed: %w", err) } if !isInjectorInstalled { - return fmt.Errorf("APM injector is not installed") + return errors.New("APM injector is not installed") } } diff --git a/pkg/fleet/installer/installinfo/installinfo.go b/pkg/fleet/installer/installinfo/installinfo.go index d9329addc7bd8f..07a193c315882f 100644 --- a/pkg/fleet/installer/installinfo/installinfo.go +++ b/pkg/fleet/installer/installinfo/installinfo.go @@ -110,14 +110,14 @@ func RemoveInstallInfo() { func getToolVersion(ctx context.Context, installType string) (tool string, toolVersion string, installerVersion string) { tool = toolInstaller toolVersion = version.AgentVersion - installerVersion = fmt.Sprintf("%s_package", installType) + installerVersion = installType + "_package" if _, err := exec.LookPath("dpkg-query"); err == nil { tool = "dpkg" toolVersion, err = getDpkgVersion(ctx) if err != nil { toolVersion = "unknown" } - toolVersion = fmt.Sprintf("dpkg-%s", toolVersion) + toolVersion = "dpkg-" + toolVersion } if _, err := exec.LookPath("rpm"); err == nil { tool = "rpm" @@ -125,7 +125,7 @@ func getToolVersion(ctx context.Context, installType string) (tool string, toolV if err != nil { toolVersion = "unknown" } - toolVersion = fmt.Sprintf("rpm-%s", toolVersion) + toolVersion = "rpm-" + toolVersion } return } diff --git a/pkg/fleet/installer/msi/msiexec.go b/pkg/fleet/installer/msi/msiexec.go index cef95c6ffe6140..6fec90d020dca0 100644 --- a/pkg/fleet/installer/msi/msiexec.go +++ b/pkg/fleet/installer/msi/msiexec.go @@ -131,14 +131,14 @@ func WithMsi(target string) MsiexecOption { func WithMsiFromPackagePath(target, product string) MsiexecOption { return func(a *msiexecArgs) error { updaterPath := filepath.Join(paths.PackagesPath, product, target) - msis, err := filepath.Glob(filepath.Join(updaterPath, fmt.Sprintf("%s-*-1-x86_64.msi", product))) + msis, err := filepath.Glob(filepath.Join(updaterPath, product+"-*-1-x86_64.msi")) if err != nil { return err } if len(msis) > 1 { - return fmt.Errorf("too many MSIs in package") + return errors.New("too many MSIs in package") } else if len(msis) == 0 { - return fmt.Errorf("no MSIs in package") + return errors.New("no MSIs in package") } a.target = msis[0] return nil @@ -509,7 +509,7 @@ func Cmd(options ...MsiexecOption) (*Msiexec, error) { } } if a.msiAction == "" || a.target == "" { - return nil, fmt.Errorf("argument error") + return nil, errors.New("argument error") } cmd := &Msiexec{ args: a, diff --git a/pkg/fleet/installer/msi/product.go b/pkg/fleet/installer/msi/product.go index 78fb0046d3b849..a12d12779fafdd 100644 --- a/pkg/fleet/installer/msi/product.go +++ b/pkg/fleet/installer/msi/product.go @@ -146,7 +146,7 @@ func FindAllProductCodes(productName string) ([]Product, error) { } if len(products) == 0 { - return nil, fmt.Errorf("no products found") + return nil, errors.New("no products found") } return products, nil diff --git a/pkg/fleet/installer/oci/download.go b/pkg/fleet/installer/oci/download.go index 67c044799407e9..7b2755408ce8c0 100644 --- a/pkg/fleet/installer/oci/download.go +++ b/pkg/fleet/installer/oci/download.go @@ -126,11 +126,11 @@ func (d *Downloader) Download(ctx context.Context, packageURL string) (*Download } name, ok := manifest.Annotations[AnnotationPackage] if !ok { - return nil, fmt.Errorf("package manifest is missing package annotation") + return nil, errors.New("package manifest is missing package annotation") } version, ok := manifest.Annotations[AnnotationVersion] if !ok { - return nil, fmt.Errorf("package manifest is missing version annotation") + return nil, errors.New("package manifest is missing version annotation") } size := uint64(0) rawSize, ok := manifest.Annotations[AnnotationSize] @@ -310,7 +310,7 @@ func (d *Downloader) downloadIndex(index oci.ImageIndex) (oci.Image, error) { } return nil, installerErrors.Wrap( installerErrors.ErrPackageNotFound, - fmt.Errorf("no matching image found in the index"), + errors.New("no matching image found in the index"), ) } diff --git a/pkg/fleet/installer/packages/apminject/apm_inject.go b/pkg/fleet/installer/packages/apminject/apm_inject.go index b4497042242e58..a7354ea557b99a 100644 --- a/pkg/fleet/installer/packages/apminject/apm_inject.go +++ b/pkg/fleet/installer/packages/apminject/apm_inject.go @@ -11,6 +11,7 @@ package apminject import ( "bytes" "context" + "errors" "fmt" "os" "os/exec" @@ -158,7 +159,7 @@ func (a *InjectorInstaller) Instrument(ctx context.Context) (retErr error) { dockerIsInstalled := isDockerInstalled(ctx) if mustInstrumentDocker(a.Env) && !dockerIsInstalled { - return fmt.Errorf("DD_APM_INSTRUMENTATION_ENABLED is set to docker but docker is not installed") + return errors.New("DD_APM_INSTRUMENTATION_ENABLED is set to docker but docker is not installed") } if shouldInstrumentDocker(a.Env) && dockerIsInstalled { // Set up defaults for agent sockets -- requires an agent restart diff --git a/pkg/fleet/installer/packages/apminject/file.go b/pkg/fleet/installer/packages/apminject/file.go index a53619b6848824..5af517e5d20e85 100644 --- a/pkg/fleet/installer/packages/apminject/file.go +++ b/pkg/fleet/installer/packages/apminject/file.go @@ -10,6 +10,7 @@ package apminject import ( "bytes" "context" + "errors" "fmt" "io" "os" @@ -161,7 +162,7 @@ func copyFile(src, dst string) (err error) { var ok bool stat, ok = srcInfo.Sys().(*syscall.Stat_t) if !ok || stat == nil { - return fmt.Errorf("could not get file stat") + return errors.New("could not get file stat") } // create dst file with same permissions diff --git a/pkg/fleet/installer/packages/datadog_agent_linux.go b/pkg/fleet/installer/packages/datadog_agent_linux.go index 04f939839b2fe7..c411baf47d2110 100644 --- a/pkg/fleet/installer/packages/datadog_agent_linux.go +++ b/pkg/fleet/installer/packages/datadog_agent_linux.go @@ -7,6 +7,7 @@ package packages import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -428,14 +429,14 @@ func (s *datadogAgentService) checkPlatformSupport(ctx HookContext) error { return nil case service.UpstartType: if ctx.PackageType != PackageTypeDEB && ctx.PackageType != PackageTypeRPM { - return fmt.Errorf("upstart is only supported in DEB and RPM packages") + return errors.New("upstart is only supported in DEB and RPM packages") } case service.SysvinitType: if ctx.PackageType != PackageTypeDEB { - return fmt.Errorf("sysvinit is only supported in DEB packages") + return errors.New("sysvinit is only supported in DEB packages") } default: - return fmt.Errorf("could not determine service manager type, platform is not supported") + return errors.New("could not determine service manager type, platform is not supported") } return nil } @@ -453,7 +454,7 @@ func (s *datadogAgentService) EnableStable(ctx HookContext) error { case service.SysvinitType: return sysvinit.InstallAll(ctx, s.SysvinitServices...) default: - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } } @@ -470,7 +471,7 @@ func (s *datadogAgentService) DisableStable(ctx HookContext) error { case service.SysvinitType: return sysvinit.RemoveAll(ctx, s.SysvinitServices...) default: - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } } @@ -494,7 +495,7 @@ func (s *datadogAgentService) RestartStable(ctx HookContext) error { case service.SysvinitType: return sysvinit.Restart(ctx, s.SysvinitMainService) default: - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } } @@ -511,7 +512,7 @@ func (s *datadogAgentService) StopStable(ctx HookContext) error { case service.SysvinitType: return sysvinit.StopAll(ctx, reverseStringSlice(s.SysvinitServices)...) default: - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } } @@ -528,7 +529,7 @@ func (s *datadogAgentService) WriteStable(ctx HookContext) error { case service.SysvinitType: return nil // Nothing to do, files are embedded in the package } - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } // RemoveStable removes the stable units @@ -544,7 +545,7 @@ func (s *datadogAgentService) RemoveStable(ctx HookContext) error { case service.SysvinitType: return nil // Nothing to do, files are embedded in the package } - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } // StartExperiment starts the experiment unit @@ -556,11 +557,11 @@ func (s *datadogAgentService) StartExperiment(ctx HookContext) error { case service.SystemdType: return systemd.StartUnit(ctx, s.SystemdMainUnitExp) case service.UpstartType: - return fmt.Errorf("experiments are not supported on upstart") + return errors.New("experiments are not supported on upstart") case service.SysvinitType: - return fmt.Errorf("experiments are not supported on sysvinit") + return errors.New("experiments are not supported on sysvinit") } - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } // StopExperiment stops the experiment units @@ -576,7 +577,7 @@ func (s *datadogAgentService) StopExperiment(ctx HookContext) error { case service.SysvinitType: return nil // Experiments are not supported on sysvinit } - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } // WriteExperiment writes the experiment units to the system and reloads the systemd daemon @@ -588,11 +589,11 @@ func (s *datadogAgentService) WriteExperiment(ctx HookContext) error { case service.SystemdType: return writeEmbeddedUnitsAndReload(ctx, s.SystemdUnitsExp...) case service.UpstartType: - return fmt.Errorf("experiments are not supported on upstart") + return errors.New("experiments are not supported on upstart") case service.SysvinitType: - return fmt.Errorf("experiments are not supported on sysvinit") + return errors.New("experiments are not supported on sysvinit") } - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } // RemoveExperiment removes the experiment units from the disk @@ -608,7 +609,7 @@ func (s *datadogAgentService) RemoveExperiment(ctx HookContext) error { case service.SysvinitType: return nil // Experiments are not supported on sysvinit } - return fmt.Errorf("unsupported service manager") + return errors.New("unsupported service manager") } // isAgentConfigFilePresent checks if the agent config file exists diff --git a/pkg/fleet/installer/packages/datadog_agent_windows.go b/pkg/fleet/installer/packages/datadog_agent_windows.go index 029e87e7f90e18..c72ac5b63f7f3e 100644 --- a/pkg/fleet/installer/packages/datadog_agent_windows.go +++ b/pkg/fleet/installer/packages/datadog_agent_windows.go @@ -307,7 +307,7 @@ func startWatchdog(_ context.Context, timeout time.Time) error { // the service has died // we need to restore the stable Agent // return an error to signal the caller to restore the stable Agent - return fmt.Errorf("Datadog Installer is not running") + return errors.New("Datadog Installer is not running") } // check the Agent service @@ -319,7 +319,7 @@ func startWatchdog(_ context.Context, timeout time.Time) error { // the service has died // we need to restore the stable Agent // return an error to signal the caller to restore the stable Agent - return fmt.Errorf("Datadog Agent is not running") + return errors.New("Datadog Agent is not running") } // wait for the events to be signaled with a timeout @@ -336,7 +336,7 @@ func startWatchdog(_ context.Context, timeout time.Time) error { } - return fmt.Errorf("watchdog timeout") + return errors.New("watchdog timeout") } diff --git a/pkg/fleet/installer/packages/exec/apm_inject_exec_windows.go b/pkg/fleet/installer/packages/exec/apm_inject_exec_windows.go index d92ad30dbc1373..c64c4f30515f39 100644 --- a/pkg/fleet/installer/packages/exec/apm_inject_exec_windows.go +++ b/pkg/fleet/installer/packages/exec/apm_inject_exec_windows.go @@ -74,7 +74,7 @@ type apmInjectExecCmd struct { } func (a *APMInjectExec) newAPMInjectExecCmd(ctx context.Context, command string, args ...string) *apmInjectExecCmd { - span, ctx := telemetry.StartSpanFromContext(ctx, fmt.Sprintf("apmInjectExec.%s", command)) + span, ctx := telemetry.StartSpanFromContext(ctx, "apmInjectExec."+command) span.SetTag("args", args) // Build the command arguments diff --git a/pkg/fleet/installer/packages/exec/dotnet_library_exec.go b/pkg/fleet/installer/packages/exec/dotnet_library_exec.go index 1ef595a4969f47..069858b4c1f299 100644 --- a/pkg/fleet/installer/packages/exec/dotnet_library_exec.go +++ b/pkg/fleet/installer/packages/exec/dotnet_library_exec.go @@ -40,7 +40,7 @@ type dotnetLibraryExecCmd struct { } func (d *DotnetLibraryExec) newDotnetLibraryExecCmd(ctx context.Context, command string, args ...string) *dotnetLibraryExecCmd { - span, ctx := telemetry.StartSpanFromContext(ctx, fmt.Sprintf("dotnetLibraryExec.%s", command)) + span, ctx := telemetry.StartSpanFromContext(ctx, "dotnetLibraryExec."+command) span.SetTag("args", args) cmd := exec.CommandContext(ctx, d.execBinPath, append([]string{command}, args...)...) cmd.Stdout = os.Stdout diff --git a/pkg/fleet/installer/packages/packages.go b/pkg/fleet/installer/packages/packages.go index d59a2cd22e3719..cb56b87872085b 100644 --- a/pkg/fleet/installer/packages/packages.go +++ b/pkg/fleet/installer/packages/packages.go @@ -279,7 +279,7 @@ type PackageCommandHandler func(ctx context.Context, command string) error // RunPackageCommand runs a package-specific command func RunPackageCommand(ctx context.Context, packageName string, command string) (err error) { - span, ctx := telemetry.StartSpanFromContext(ctx, fmt.Sprintf("package.%s", packageName)) + span, ctx := telemetry.StartSpanFromContext(ctx, "package."+packageName) span.SetTag("command", command) defer func() { span.Finish(err) }() diff --git a/pkg/fleet/installer/packages/service/systemd/systemd.go b/pkg/fleet/installer/packages/service/systemd/systemd.go index f5d05a2d2c5e3b..721a3a59ff6d29 100644 --- a/pkg/fleet/installer/packages/service/systemd/systemd.go +++ b/pkg/fleet/installer/packages/service/systemd/systemd.go @@ -127,7 +127,7 @@ func WriteUnitOverride(ctx context.Context, unit string, name string, content st if err != nil { return fmt.Errorf("error creating systemd directory: %w", err) } - overridePath := filepath.Join(userUnitsPath, unit+".d", fmt.Sprintf("%s.conf", name)) + overridePath := filepath.Join(userUnitsPath, unit+".d", name+".conf") return os.WriteFile(overridePath, []byte(content), 0644) } diff --git a/pkg/fleet/installer/packages/service/windows/impl.go b/pkg/fleet/installer/packages/service/windows/impl.go index 847b70277c151f..7a8b8898c45779 100644 --- a/pkg/fleet/installer/packages/service/windows/impl.go +++ b/pkg/fleet/installer/packages/service/windows/impl.go @@ -11,6 +11,7 @@ import ( "context" "errors" "fmt" + "strconv" "time" "golang.org/x/sys/windows" @@ -93,7 +94,7 @@ func (w *WinServiceManager) terminateServiceProcess(ctx context.Context, service return nil // Service is not running } - span.SetTag("pid", fmt.Sprintf("%d", processID)) + span.SetTag("pid", strconv.FormatUint(uint64(processID), 10)) // Open the process with termination rights handle, err := w.api.OpenProcess(windows.SYNCHRONIZE|windows.PROCESS_TERMINATE|windows.PROCESS_QUERY_LIMITED_INFORMATION, false, processID) diff --git a/pkg/fleet/installer/packages/user/windows/user.go b/pkg/fleet/installer/packages/user/windows/user.go index d04c433b1d4031..abb84eda57cabe 100644 --- a/pkg/fleet/installer/packages/user/windows/user.go +++ b/pkg/fleet/installer/packages/user/windows/user.go @@ -111,7 +111,7 @@ func ValidateAgentUserRemoteUpdatePrerequisites(userName string) error { // Remote updates fully uninstall the previous version, so we need the password. return installerErrors.Wrap( installerErrors.ErrPasswordNotProvided, - fmt.Errorf("the Agent user password is not available. The password is required for domain accounts. Please reinstall the Agent with the password provided"), + errors.New("the Agent user password is not available. The password is required for domain accounts. Please reinstall the Agent with the password provided"), ) } @@ -385,7 +385,7 @@ var validateProcessContext = func() error { } if !user.User.Sid.IsWellKnown(windows.WinLocalSystemSid) { - return fmt.Errorf("process is not running as LocalSystem") + return errors.New("process is not running as LocalSystem") } return nil diff --git a/pkg/fleet/installer/paths/installer_paths_windows.go b/pkg/fleet/installer/paths/installer_paths_windows.go index 5f6455f3262613..9eeb3efd949e64 100644 --- a/pkg/fleet/installer/paths/installer_paths_windows.go +++ b/pkg/fleet/installer/paths/installer_paths_windows.go @@ -286,7 +286,7 @@ func IsDirSecure(targetDir string) error { return fmt.Errorf("failed to get owner: %w", err) } if owner == nil { - return fmt.Errorf("owner is nil") + return errors.New("owner is nil") } var allowedSids []*windows.SID for _, id := range allowedWellKnownSids { diff --git a/pkg/fleet/installer/repository/repository.go b/pkg/fleet/installer/repository/repository.go index 23cb545e323373..a421ea84a63a65 100644 --- a/pkg/fleet/installer/repository/repository.go +++ b/pkg/fleet/installer/repository/repository.go @@ -205,7 +205,7 @@ func (r *Repository) Delete(ctx context.Context) error { } if len(files) > 0 { - return fmt.Errorf("could not delete root directory, not empty after cleanup") + return errors.New("could not delete root directory, not empty after cleanup") } // Delete the repository directory @@ -231,20 +231,20 @@ func (r *Repository) SetExperiment(ctx context.Context, name string, sourcePath return fmt.Errorf("could not cleanup repository: %w", err) } if !repository.stable.Exists() { - return fmt.Errorf("stable link does not exist, invalid state") + return errors.New("stable link does not exist, invalid state") } if !repository.experiment.Exists() { - return fmt.Errorf("experiment link does not exist, invalid state") + return errors.New("experiment link does not exist, invalid state") } // Because we repair directories on windows, repository.setExperiment will // not fail if called for a version that is already set to experiment or // stable while it does on unix. These check ensure that we have the same // behavior on both platforms. if filepath.Base(*repository.experiment.packagePath) == name { - return fmt.Errorf("cannot set new experiment to the same version as the current experiment") + return errors.New("cannot set new experiment to the same version as the current experiment") } if filepath.Base(*repository.stable.packagePath) == name { - return fmt.Errorf("cannot set new experiment to the same version as stable") + return errors.New("cannot set new experiment to the same version as stable") } err = repository.setExperiment(name, sourcePath) if err != nil { @@ -268,13 +268,13 @@ func (r *Repository) PromoteExperiment(ctx context.Context) error { return fmt.Errorf("could not cleanup repository: %w", err) } if !repository.stable.Exists() { - return fmt.Errorf("stable link does not exist, invalid state") + return errors.New("stable link does not exist, invalid state") } if !repository.experiment.Exists() { - return fmt.Errorf("experiment link does not exist, invalid state") + return errors.New("experiment link does not exist, invalid state") } if repository.experiment.Target() == "" || repository.stable.Target() == repository.experiment.Target() { - return fmt.Errorf("no experiment to promote") + return errors.New("no experiment to promote") } err = repository.stable.Set(*repository.experiment.packagePath) if err != nil { @@ -302,10 +302,10 @@ func (r *Repository) DeleteExperiment(ctx context.Context) error { return fmt.Errorf("could not cleanup repository: %w", err) } if !repository.stable.Exists() { - return fmt.Errorf("stable link does not exist, invalid state") + return errors.New("stable link does not exist, invalid state") } if !repository.experiment.Exists() { - return fmt.Errorf("experiment link does not exist, invalid state") + return errors.New("experiment link does not exist, invalid state") } err = repository.setExperimentToStable() if err != nil { @@ -378,7 +378,7 @@ func (r *repositoryFiles) setStable(name string, sourcePath string) error { func movePackageFromSource(packageName string, rootPath string, sourcePath string) (string, error) { if packageName == "" || packageName == stableVersionLink || packageName == experimentVersionLink { - return "", fmt.Errorf("invalid package name") + return "", errors.New("invalid package name") } targetPath := filepath.Join(rootPath, packageName) _, err := os.Stat(targetPath) @@ -395,7 +395,7 @@ func movePackageFromSource(packageName string, rootPath string, sourcePath strin } return targetPath, nil } - return "", fmt.Errorf("target package already exists") + return "", errors.New("target package already exists") } if !errors.Is(err, os.ErrNotExist) { return "", fmt.Errorf("could not stat target package: %w", err) diff --git a/pkg/fleet/installer/setup/common/setup.go b/pkg/fleet/installer/setup/common/setup.go index 0b55afeb5b3e01..731f30bfe64809 100644 --- a/pkg/fleet/installer/setup/common/setup.go +++ b/pkg/fleet/installer/setup/common/setup.go @@ -67,7 +67,7 @@ Running the %s installation script (https://github.com/DataDog/datadog-agent/tre return r == ',' || r == ' ' }) // comma and space-separated list, consistent with viper and documentation } - span, ctx := telemetry.StartSpanFromContext(ctx, fmt.Sprintf("setup.%s", flavor)) + span, ctx := telemetry.StartSpanFromContext(ctx, "setup."+flavor) s := &Setup{ configDir: paths.DatadogDataDir, installer: installer, @@ -129,7 +129,7 @@ func (s *Setup) Run() (err error) { return fmt.Errorf("failed to write configuration: %w", err) } } - err = installinfo.WriteInstallInfo(ctx, fmt.Sprintf("install-script-%s", s.flavor)) + err = installinfo.WriteInstallInfo(ctx, "install-script-"+s.flavor) if err != nil { return fmt.Errorf("failed to write install info: %w", err) } diff --git a/pkg/fleet/installer/setup/config/write.go b/pkg/fleet/installer/setup/config/write.go index e0a49cbadd171d..4d50b141ff8c25 100644 --- a/pkg/fleet/installer/setup/config/write.go +++ b/pkg/fleet/installer/setup/config/write.go @@ -7,6 +7,7 @@ package config import ( "bytes" + "errors" "fmt" "os" "path/filepath" @@ -235,7 +236,7 @@ func ensureUTF8(input []byte) ([]byte, error) { // Ensure already UTF-8 if !utf8.Valid(input) { - return nil, fmt.Errorf("contains bytes that are not valid UTF-8") + return nil, errors.New("contains bytes that are not valid UTF-8") } return input, nil diff --git a/pkg/fleet/installer/setup/defaultscript/default_script.go b/pkg/fleet/installer/setup/defaultscript/default_script.go index 69a007b216e219..7680c72960102d 100644 --- a/pkg/fleet/installer/setup/defaultscript/default_script.go +++ b/pkg/fleet/installer/setup/defaultscript/default_script.go @@ -271,7 +271,7 @@ func exitOnUnsupportedEnvVars(envVars ...string) error { func telemetrySupportedEnvVars(s *common.Setup, envVars ...string) { for _, envVar := range envVars { - s.Span.SetTag(fmt.Sprintf("env_var.%s", envVar), os.Getenv(envVar)) + s.Span.SetTag("env_var."+envVar, os.Getenv(envVar)) } } diff --git a/pkg/fleet/installer/telemetry/cmd_wrapper.go b/pkg/fleet/installer/telemetry/cmd_wrapper.go index 1d153f8594ab7b..488ecf1ecd85f1 100644 --- a/pkg/fleet/installer/telemetry/cmd_wrapper.go +++ b/pkg/fleet/installer/telemetry/cmd_wrapper.go @@ -23,7 +23,7 @@ type TracedCmd struct { // CommandContext runs a command using exec.CommandContext and adds telemetry func CommandContext(ctx context.Context, name string, args ...string) *TracedCmd { - span, _ := StartSpanFromContext(ctx, fmt.Sprintf("exec.%s", name)) + span, _ := StartSpanFromContext(ctx, "exec."+name) span.SetTag("name", name) span.SetTag("args", strings.Join(args, " ")) cmd := exec.CommandContext(ctx, name, args...) diff --git a/pkg/gohai/cpu/cpu_darwin.go b/pkg/gohai/cpu/cpu_darwin.go index 80faa87c17e68b..8cb4d732cb578c 100644 --- a/pkg/gohai/cpu/cpu_darwin.go +++ b/pkg/gohai/cpu/cpu_darwin.go @@ -7,7 +7,7 @@ package cpu import ( "errors" - "fmt" + "strconv" "github.com/DataDog/datadog-agent/pkg/gohai/utils" "golang.org/x/sys/unix" @@ -31,7 +31,7 @@ func getSysctlString(key string) utils.Value[string] { // type returned by sysctl is uint32, stored as string func getSysctlInt32String(key string) utils.Value[string] { - castFun := func(val uint32) string { return fmt.Sprintf("%d", val) } + castFun := func(val uint32) string { return strconv.FormatUint(uint64(val), 10) } return getSysctl(unix.SysctlUint32, castFun, key) } diff --git a/pkg/gohai/cpu/cpu_windows.go b/pkg/gohai/cpu/cpu_windows.go index 6c597de652b800..8271c590180f8a 100644 --- a/pkg/gohai/cpu/cpu_windows.go +++ b/pkg/gohai/cpu/cpu_windows.go @@ -12,7 +12,6 @@ package cpu */ import "C" import ( - "fmt" "regexp" "strconv" "strings" @@ -45,7 +44,7 @@ type cpuInfo struct { } func extract(caption, field string) string { - re := regexp.MustCompile(fmt.Sprintf("%s [0-9]* ", field)) + re := regexp.MustCompile(field + " [0-9]* ") matches := re.FindStringSubmatch(caption) if len(matches) > 0 { return strings.Split(matches[0], " ")[1] diff --git a/pkg/gohai/filesystem/filesystem_windows.go b/pkg/gohai/filesystem/filesystem_windows.go index a74efab2550cb5..cbfae65f8d87e7 100644 --- a/pkg/gohai/filesystem/filesystem_windows.go +++ b/pkg/gohai/filesystem/filesystem_windows.go @@ -6,6 +6,7 @@ package filesystem import ( + "strings" "unsafe" "golang.org/x/sys/windows" @@ -36,14 +37,14 @@ func convertWindowsStringList(winput []uint16) []string { // as would this func convertWindowsString(winput []uint16) string { - var retstring string + var builder strings.Builder for i := 0; i < len(winput); i++ { if winput[i] == 0 { break } - retstring += string(rune(winput[i])) + builder.WriteRune(rune(winput[i])) } - return retstring + return builder.String() } func getDiskSize(vol string) (size uint64, freespace uint64) { diff --git a/pkg/gohai/memory/memory_linux.go b/pkg/gohai/memory/memory_linux.go index 822a082fc4aee7..aa9a15670e108a 100644 --- a/pkg/gohai/memory/memory_linux.go +++ b/pkg/gohai/memory/memory_linux.go @@ -7,6 +7,7 @@ package memory import ( "bufio" + "errors" "fmt" "io" "os" @@ -29,8 +30,8 @@ func parseMemoryInfo(reader io.Reader) (totalBytes utils.Value[uint64], swapTota return } - totalBytes = utils.NewErrorValue[uint64](fmt.Errorf("'MemTotal' not found in /proc/meminfo")) - swapTotalKb = utils.NewErrorValue[uint64](fmt.Errorf("'SwapTotal' not found in /proc/meminfo")) + totalBytes = utils.NewErrorValue[uint64](errors.New("'MemTotal' not found in /proc/meminfo")) + swapTotalKb = utils.NewErrorValue[uint64](errors.New("'SwapTotal' not found in /proc/meminfo")) for _, line := range lines { key, valUnit, found := strings.Cut(line, ":") if !found { diff --git a/pkg/gohai/utils/value_test.go b/pkg/gohai/utils/value_test.go index 876030dd0e9374..0219a2a61a99ad 100644 --- a/pkg/gohai/utils/value_test.go +++ b/pkg/gohai/utils/value_test.go @@ -6,7 +6,6 @@ package utils import ( "errors" - "fmt" "testing" "github.com/stretchr/testify/require" @@ -33,7 +32,7 @@ func TestNewErrorValue(t *testing.T) { } func TestNewValueFrom(t *testing.T) { - myerr := fmt.Errorf("yet another error") + myerr := errors.New("yet another error") value := NewValueFrom(42, myerr) _, err := value.Value() require.ErrorIs(t, err, myerr) @@ -48,7 +47,7 @@ func TestError(t *testing.T) { value := NewValue(1) require.NoError(t, value.Error()) - myerr := fmt.Errorf("again an error !?") + myerr := errors.New("again an error !?") errorValue := NewErrorValue[int](myerr) require.ErrorIs(t, myerr, errorValue.Error()) } @@ -58,6 +57,6 @@ func TestValueOrDefault(t *testing.T) { val := value.ValueOrDefault() require.Equal(t, 1, val) - value = NewErrorValue[int](fmt.Errorf("still an error")) + value = NewErrorValue[int](errors.New("still an error")) require.Empty(t, value.ValueOrDefault()) } diff --git a/pkg/gpu/cgroups_test.go b/pkg/gpu/cgroups_test.go index 9bb073d8ee39ee..c4a72cd9e3952d 100644 --- a/pkg/gpu/cgroups_test.go +++ b/pkg/gpu/cgroups_test.go @@ -295,7 +295,7 @@ func TestDetachAllDeviceCgroupPrograms(t *testing.T) { devnull.Close() } - testCgroupName := fmt.Sprintf("test-detach-device-programs-%s", utils.RandString(10)) + testCgroupName := "test-detach-device-programs-" + utils.RandString(10) testCgroupPath := filepath.Join("/sys/fs/cgroup", testCgroupName) moveSelfToCgroup(t, testCgroupName) @@ -382,7 +382,7 @@ func TestConfigureCgroupV1DeviceAllow(t *testing.T) { devnull.Close() } - testCgroupName := fmt.Sprintf("test-cgroup-device-allow-%s", utils.RandString(10)) + testCgroupName := "test-cgroup-device-allow-" + utils.RandString(10) moveSelfToCgroup(t, testCgroupName) // Test that /dev/null is still accessible after moving to cgroup @@ -428,7 +428,7 @@ func TestGetAbsoluteCgroupForProcess(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, currentCgroup) // Cgroup could be anything, but it should not be empty - testCgroupName := fmt.Sprintf("test-get-cgroup-for-process-%s", utils.RandString(10)) + testCgroupName := "test-get-cgroup-for-process-" + utils.RandString(10) moveSelfToCgroup(t, testCgroupName) currentCgroup, err = getAbsoluteCgroupForProcess("", "/", uint32(os.Getpid()), uint32(os.Getpid()), containerdcgroups.Mode()) @@ -448,13 +448,13 @@ func TestGetAbsoluteCgroupV1ForProcess(t *testing.T) { } mainCgroup := testutil.FakeCgroup{ - Name: fmt.Sprintf("test-parent-cgroup-%s", utils.RandString(10)), + Name: "test-parent-cgroup-" + utils.RandString(10), PIDs: []int{mainPid}, Parent: &rootCgroup, } siblingCgroup := testutil.FakeCgroup{ - Name: fmt.Sprintf("test-sibling-cgroup-%s", utils.RandString(10)), + Name: "test-sibling-cgroup-" + utils.RandString(10), PIDs: []int{siblingPid}, Parent: &rootCgroup, } @@ -498,12 +498,12 @@ func TestGetAbsoluteCgroupV2ForProcessInsideContainer(t *testing.T) { } parentCgroup := testutil.FakeCgroup{ - Name: fmt.Sprintf("test-parent-cgroup-%s", utils.RandString(10)), + Name: "test-parent-cgroup-" + utils.RandString(10), PIDs: []int{}, } childCgroup := testutil.FakeCgroup{ - Name: fmt.Sprintf("test-child-cgroup-%s", utils.RandString(10)), + Name: "test-child-cgroup-" + utils.RandString(10), Parent: &parentCgroup, PIDs: []int{pid}, IsContainerRoot: true, @@ -511,7 +511,7 @@ func TestGetAbsoluteCgroupV2ForProcessInsideContainer(t *testing.T) { } siblingCgroup := testutil.FakeCgroup{ - Name: fmt.Sprintf("test-sibling-cgroup-%s", utils.RandString(10)), + Name: "test-sibling-cgroup-" + utils.RandString(10), Parent: &parentCgroup, PIDs: []int{siblingProc}, VisibleInContainerNamespace: true, diff --git a/pkg/gpu/containers/containers.go b/pkg/gpu/containers/containers.go index 0bec9ceaae2e45..4f8b5c4f689978 100644 --- a/pkg/gpu/containers/containers.go +++ b/pkg/gpu/containers/containers.go @@ -150,7 +150,7 @@ func findDeviceForResourceName(devices []ddnvml.Device, resourceID string) (ddnv physicalDevice, isPhysicalDevice := device.(*ddnvml.PhysicalDevice) _, isMigDevice := device.(*ddnvml.MIGDevice) if isMigDevice || (isPhysicalDevice && len(physicalDevice.MIGChildren) > 0) { - return nil, fmt.Errorf("MIG devices are not supported for GKE device plugin") + return nil, errors.New("MIG devices are not supported for GKE device plugin") } } diff --git a/pkg/gpu/cuda/cubin.go b/pkg/gpu/cuda/cubin.go index 32ae68bd04b60c..2a0cefd8dcb231 100644 --- a/pkg/gpu/cuda/cubin.go +++ b/pkg/gpu/cuda/cubin.go @@ -11,6 +11,7 @@ package cuda import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "regexp" @@ -179,7 +180,7 @@ func (cp *cubinParser) parseCubinElf(data []byte) error { // Hacks to be able to parse the ELF: the ELF version is not supported by the Go ELF parser, so we need to // trick it into thinking it's the old version. Check for boundaries first if len(data) <= elfVersionOffset { - return fmt.Errorf("invalid cubin data, too short") + return errors.New("invalid cubin data, too short") } data[elfVersionOffset] = 1 diff --git a/pkg/gpu/cuda/elf_test.go b/pkg/gpu/cuda/elf_test.go index 0c448542a1754a..1683e0c9233fe8 100644 --- a/pkg/gpu/cuda/elf_test.go +++ b/pkg/gpu/cuda/elf_test.go @@ -11,7 +11,6 @@ package cuda import ( - "fmt" "os" "path/filepath" "runtime" @@ -28,7 +27,7 @@ func TestLazySectionReader(t *testing.T) { require.NoError(t, err) libdir := filepath.Join(curDir, "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") - lib := filepath.Join(libdir, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) + lib := filepath.Join(libdir, "libssl.so."+runtime.GOARCH) f, err := os.Open(lib) require.NoError(t, err) diff --git a/pkg/gpu/probe.go b/pkg/gpu/probe.go index f6e83d095458ae..673fe716214971 100644 --- a/pkg/gpu/probe.go +++ b/pkg/gpu/probe.go @@ -296,10 +296,10 @@ func (p *Probe) initCOREGPU(cfg *config.Config) error { func getAssetName(module string, debug bool) string { if debug { - return fmt.Sprintf("%s-debug.o", module) + return module + "-debug.o" } - return fmt.Sprintf("%s.o", module) + return module + ".o" } func (p *Probe) setupManager(buf io.ReaderAt, opts manager.Options) error { diff --git a/pkg/gpu/safenvml/errors.go b/pkg/gpu/safenvml/errors.go index c261a8e7b3b40e..50d86abd2e30ad 100644 --- a/pkg/gpu/safenvml/errors.go +++ b/pkg/gpu/safenvml/errors.go @@ -27,9 +27,9 @@ type NvmlAPIError struct { func (e *NvmlAPIError) Error() string { switch { case errors.Is(e.NvmlErrorCode, nvml.ERROR_FUNCTION_NOT_FOUND): - return fmt.Sprintf("%s symbol not found in NVML library", e.APIName) + return e.APIName + " symbol not found in NVML library" case errors.Is(e.NvmlErrorCode, nvml.ERROR_NOT_SUPPORTED): - return fmt.Sprintf("%s is not supported by the GPU or driver", e.APIName) + return e.APIName + " is not supported by the GPU or driver" default: return fmt.Sprintf("NVML API error for %s: %s", e.APIName, nvml.ErrorString(e.NvmlErrorCode)) } diff --git a/pkg/gpu/safenvml/lib.go b/pkg/gpu/safenvml/lib.go index 2cb5afdffee401..ba559deae62ebd 100644 --- a/pkg/gpu/safenvml/lib.go +++ b/pkg/gpu/safenvml/lib.go @@ -316,7 +316,7 @@ func (s *safeNvml) ensureInitWithOpts(nvmlNewFunc func(opts ...nvml.LibraryOptio lib := nvmlNewFunc(nvml.WithLibraryPath(libpath)) if lib == nil { - return fmt.Errorf("failed to create NVML library") + return errors.New("failed to create NVML library") } ret := lib.Init() diff --git a/pkg/gpu/stream_test.go b/pkg/gpu/stream_test.go index f8fa917fdd8f64..4b0dc847f9e67c 100644 --- a/pkg/gpu/stream_test.go +++ b/pkg/gpu/stream_test.go @@ -8,9 +8,9 @@ package gpu import ( - "fmt" "os" "path/filepath" + "strconv" "sync" "sync/atomic" "testing" @@ -379,7 +379,7 @@ func TestKernelLaunchEnrichment(t *testing.T) { if fatbinParsingEnabled { // Create all parent directories, // the path should match the procBinPath var value in cuda.AddKernelCacheEntry - tmpFoldersPath := filepath.Join(proc, fmt.Sprintf("%d", pid), "root") + tmpFoldersPath := filepath.Join(proc, strconv.FormatUint(pid, 10), "root") err := os.MkdirAll(tmpFoldersPath, 0755) require.NoError(t, err) filePath := filepath.Join(tmpFoldersPath, binPath) diff --git a/pkg/gpu/tags/tags_test.go b/pkg/gpu/tags/tags_test.go index 9479cad245eaec..e84c01aa6f8c3a 100644 --- a/pkg/gpu/tags/tags_test.go +++ b/pkg/gpu/tags/tags_test.go @@ -8,9 +8,9 @@ package tags import ( - "fmt" "os" "path/filepath" + "strconv" "testing" "github.com/DataDog/datadog-agent/pkg/util/kernel" @@ -68,7 +68,7 @@ func TestGetTags(t *testing.T) { } // Create multiple dummy GPU entries for i := 0; i < 2; i++ { - if err := os.WriteFile(filepath.Join(nvidiaPath, fmt.Sprintf("%d", i)), []byte("dummy"), 0644); err != nil { + if err := os.WriteFile(filepath.Join(nvidiaPath, strconv.Itoa(i)), []byte("dummy"), 0644); err != nil { return err } } diff --git a/pkg/gpu/testutil/cgroups.go b/pkg/gpu/testutil/cgroups.go index c87b0d0d808102..333cc5dbcc8dc5 100644 --- a/pkg/gpu/testutil/cgroups.go +++ b/pkg/gpu/testutil/cgroups.go @@ -10,7 +10,6 @@ package testutil import ( "errors" - "fmt" "os" "path/filepath" "strconv" @@ -199,7 +198,7 @@ func addCgroupPidFiles(tb testing.TB, procfs string, cgroup *FakeCgroup, rootCgr filepath.Join(procfs, strconv.Itoa(pid), "task", strconv.Itoa(pid), "cgroup"), filepath.Join(procfs, strconv.Itoa(pid), "cgroup"), } - contents := fmt.Sprintf("0::/%s", cgroupRelativeToRoot) + contents := "0::/" + cgroupRelativeToRoot for _, targetFile := range targetFiles { tb.Logf("cgroup %s: %s written to %s", cgroup.Name, contents, targetFile) diff --git a/pkg/gpu/testutil/samplebins.go b/pkg/gpu/testutil/samplebins.go index bdc6df2cd88256..148826199ba495 100644 --- a/pkg/gpu/testutil/samplebins.go +++ b/pkg/gpu/testutil/samplebins.go @@ -80,7 +80,7 @@ type CudaSampleArgs struct { // Env returns the environment variables for the CUDA sample binary func (a *CudaSampleArgs) Env() []string { if a.CudaVisibleDevicesEnv != "" { - return []string{fmt.Sprintf("CUDA_VISIBLE_DEVICES=%s", a.CudaVisibleDevicesEnv)} + return []string{"CUDA_VISIBLE_DEVICES=" + a.CudaVisibleDevicesEnv} } return nil } @@ -215,7 +215,7 @@ func RunSampleInDocker(t testing.TB, sample Sample, image dockerImage) (int, str // RunSampleInDockerWithArgs executes the sample binary in a Docker container and returns the PID of the main container process, and the container ID func RunSampleInDockerWithArgs(t testing.TB, sample Sample, image dockerImage, args SampleArgs) (int, string) { builtBin := getBuiltSamplePath(t, sample) - containerName := fmt.Sprintf("gpu-testutil-%s", utils.RandString(10)) + containerName := "gpu-testutil-" + utils.RandString(10) scanner, err := procutil.NewScanner(sample.StartPattern, sample.FinishedPattern) require.NoError(t, err, "failed to create pattern scanner") diff --git a/pkg/inventory/software/collector_test.go b/pkg/inventory/software/collector_test.go index 3ace765504ca12..73e2ed331ad1f5 100644 --- a/pkg/inventory/software/collector_test.go +++ b/pkg/inventory/software/collector_test.go @@ -6,9 +6,10 @@ package software import ( - "fmt" - "github.com/stretchr/testify/assert" + "errors" "testing" + + "github.com/stretchr/testify/assert" ) // MockCollector implements Collector for testing @@ -78,7 +79,7 @@ func TestCollectorOrchestration(t *testing.T) { name: "Collector error handling - continues with other collectors", collectors: []Collector{ &MockCollector{ - err: fmt.Errorf("registry access denied"), + err: errors.New("registry access denied"), }, &MockCollector{ entries: map[string]*Entry{ @@ -93,12 +94,12 @@ func TestCollectorOrchestration(t *testing.T) { name: "Collector error handling - multiple errors", collectors: []Collector{ &MockCollector{ - err: fmt.Errorf("msi error"), + err: errors.New("msi error"), entries: map[string]*Entry{ "app1": {DisplayName: "MSI App", Version: "1.0", Source: "desktop"}, }, }, - &MockCollector{err: fmt.Errorf("registry error")}, + &MockCollector{err: errors.New("registry error")}, }, expectedEntryCount: 0, // No entries returned on error because the collector was skipped expectError: true, diff --git a/pkg/inventory/software/collector_windows_test.go b/pkg/inventory/software/collector_windows_test.go index 4b4e0e4754be96..17557ea8235e07 100644 --- a/pkg/inventory/software/collector_windows_test.go +++ b/pkg/inventory/software/collector_windows_test.go @@ -191,7 +191,7 @@ func TestTrimVersion(t *testing.T) { } for _, tt := range tests { - t.Run(fmt.Sprintf("input_%s", tt.input), func(t *testing.T) { + t.Run("input_"+tt.input, func(t *testing.T) { result := trimVersion(tt.input) assert.Equal(t, tt.expected, result, "trimVersion(%q) should return %q", tt.input, tt.expected) }) diff --git a/pkg/inventory/software/msi_collector.go b/pkg/inventory/software/msi_collector.go index ca75ac57e8d038..9ba61061bfc9cd 100644 --- a/pkg/inventory/software/msi_collector.go +++ b/pkg/inventory/software/msi_collector.go @@ -8,7 +8,7 @@ package software import ( - "fmt" + "errors" "golang.org/x/sys/windows" @@ -122,7 +122,7 @@ func getMsiProductInfo(productCode []uint16, propertiesToFetch []string) (*Entry name = properties[msiProductName] } if name == "" { - return nil, fmt.Errorf("no valid name found for product") + return nil, errors.New("no valid name found for product") } version := mappedProperties[displayVersion] diff --git a/pkg/jmxfetch/jmxfetch.go b/pkg/jmxfetch/jmxfetch.go index 3bb25a32842947..4e77401508dc0c 100644 --- a/pkg/jmxfetch/jmxfetch.go +++ b/pkg/jmxfetch/jmxfetch.go @@ -15,6 +15,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "syscall" "time" @@ -222,7 +223,7 @@ func (j *JMXFetch) Start(manage bool) error { default: dsdStatus := j.getDSDStatus() if dsdStatus == DSDStatusRunningUDSDatagram { - reporter = fmt.Sprintf("statsd:unix://%s", pkgconfigsetup.Datadog().GetString("dogstatsd_socket")) + reporter = "statsd:unix://" + pkgconfigsetup.Datadog().GetString("dogstatsd_socket") } else { // We always use UDP if we don't definitively detect UDS running, but we want to let the user know if we // actually detected that UDP should be running, or if we're just in fallback mode. @@ -295,7 +296,7 @@ func (j *JMXFetch) Start(manage bool) error { if err := os.MkdirAll(javaTmpDir, 0755); err != nil { log.Warnf("Failed to create jmxfetch temporary directory %s: %v", javaTmpDir, err) } else { - javaTmpDirOpt := fmt.Sprintf(" -Djava.io.tmpdir=%s", javaTmpDir) + javaTmpDirOpt := " -Djava.io.tmpdir=" + javaTmpDir javaOptions += javaTmpDirOpt } } @@ -324,15 +325,15 @@ func (j *JMXFetch) Start(manage bool) error { "-classpath", classpath, jmxMainClass, "--ipc_host", ipcHost, - "--ipc_port", fmt.Sprintf("%v", ipcPort), - "--check_period", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_check_period")), // Period of the main loop of jmxfetch in ms - "--thread_pool_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_thread_pool_size")), // Size for the JMXFetch thread pool - "--collection_timeout", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_collection_timeout")), // Timeout for metric collection in seconds - "--reconnection_timeout", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_reconnection_timeout")), // Timeout for instance reconnection in seconds - "--reconnection_thread_pool_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_reconnection_thread_pool_size")), // Size for the JMXFetch reconnection thread pool + "--ipc_port", strconv.Itoa(ipcPort), + "--check_period", strconv.Itoa(pkgconfigsetup.Datadog().GetInt("jmx_check_period")), // Period of the main loop of jmxfetch in ms + "--thread_pool_size", strconv.Itoa(pkgconfigsetup.Datadog().GetInt("jmx_thread_pool_size")), // Size for the JMXFetch thread pool + "--collection_timeout", strconv.Itoa(pkgconfigsetup.Datadog().GetInt("jmx_collection_timeout")), // Timeout for metric collection in seconds + "--reconnection_timeout", strconv.Itoa(pkgconfigsetup.Datadog().GetInt("jmx_reconnection_timeout")), // Timeout for instance reconnection in seconds + "--reconnection_thread_pool_size", strconv.Itoa(pkgconfigsetup.Datadog().GetInt("jmx_reconnection_thread_pool_size")), // Size for the JMXFetch reconnection thread pool "--log_level", jmxLogLevel, "--reporter", reporter, // Reporter to use - "--statsd_queue_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_queue_size")), // Dogstatsd client queue size to use + "--statsd_queue_size", strconv.Itoa(pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_queue_size")), // Dogstatsd client queue size to use ) if pkgconfigsetup.Datadog().GetBool("jmx_statsd_telemetry_enabled") { @@ -348,11 +349,11 @@ func (j *JMXFetch) Start(manage bool) error { } if bufSize := pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_buffer_size"); bufSize != 0 { - subprocessArgs = append(subprocessArgs, "--statsd_buffer_size", fmt.Sprintf("%d", bufSize)) + subprocessArgs = append(subprocessArgs, "--statsd_buffer_size", strconv.Itoa(bufSize)) } if socketTimeout := pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_socket_timeout"); socketTimeout != 0 { - subprocessArgs = append(subprocessArgs, "--statsd_socket_timeout", fmt.Sprintf("%d", socketTimeout)) + subprocessArgs = append(subprocessArgs, "--statsd_socket_timeout", strconv.Itoa(socketTimeout)) } if pkgconfigsetup.Datadog().GetBool("log_format_rfc3339") { @@ -366,13 +367,13 @@ func (j *JMXFetch) Start(manage bool) error { // set environment + token j.cmd.Env = append( os.Environ(), - fmt.Sprintf("SESSION_TOKEN=%s", j.ipcComp.GetAuthToken()), + "SESSION_TOKEN="+j.ipcComp.GetAuthToken(), ) // append JAVA_TOOL_OPTIONS to cmd Env javaToolOptions := pkgconfigsetup.Datadog().GetString("jmx_java_tool_options") if len(javaToolOptions) > 0 { - j.cmd.Env = append(j.cmd.Env, fmt.Sprintf("JAVA_TOOL_OPTIONS=%s", javaToolOptions)) + j.cmd.Env = append(j.cmd.Env, "JAVA_TOOL_OPTIONS="+javaToolOptions) } // forward the standard output to the Agent logger diff --git a/pkg/kubestatemetrics/store/store.go b/pkg/kubestatemetrics/store/store.go index abc46ce22cce0e..4fec808ca1db42 100644 --- a/pkg/kubestatemetrics/store/store.go +++ b/pkg/kubestatemetrics/store/store.go @@ -10,7 +10,7 @@ package store import ( - "fmt" + "errors" "sync" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -123,7 +123,7 @@ func (s *MetricsStore) Add(obj interface{}) error { func buildTags(metrics *metric.Metric) (map[string]string, error) { if len(metrics.LabelKeys) != len(metrics.LabelValues) { - return nil, fmt.Errorf("LabelKeys and LabelValues not same size") + return nil, errors.New("LabelKeys and LabelValues not same size") } tags := make(map[string]string, len(metrics.LabelValues)) for i, key := range metrics.LabelKeys { diff --git a/pkg/kubestatemetrics/store/store_test.go b/pkg/kubestatemetrics/store/store_test.go index 8acb6e72a32fff..6847b4926b4a15 100644 --- a/pkg/kubestatemetrics/store/store_test.go +++ b/pkg/kubestatemetrics/store/store_test.go @@ -8,7 +8,7 @@ package store import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -101,7 +101,7 @@ func TestBuildTags(t *testing.T) { LabelKeys: []string{"bar", "ole", "toolong"}, }, expected: map[string]string{}, - err: fmt.Errorf("LabelKeys and LabelValues not same size"), + err: errors.New("LabelKeys and LabelValues not same size"), }, } for _, test := range tests { diff --git a/pkg/languagedetection/languagemodels/containerlanguages.go b/pkg/languagedetection/languagemodels/containerlanguages.go index 73b9b084c6055b..58755126242f05 100644 --- a/pkg/languagedetection/languagemodels/containerlanguages.go +++ b/pkg/languagedetection/languagemodels/containerlanguages.go @@ -6,7 +6,6 @@ package languagemodels import ( - "fmt" "reflect" "sort" "strings" @@ -81,7 +80,7 @@ func (c ContainersLanguages) ToAnnotations() map[string]string { for container, langSet := range c { containerName := container.Name if container.Init { - containerName = fmt.Sprintf("init.%s", containerName) + containerName = "init." + containerName } annotationKey := GetLanguageAnnotationKey(containerName) diff --git a/pkg/languagedetection/util/owners.go b/pkg/languagedetection/util/owners.go index a7915cd6258a2e..1a7b3a90c7aa11 100644 --- a/pkg/languagedetection/util/owners.go +++ b/pkg/languagedetection/util/owners.go @@ -6,11 +6,11 @@ package util import ( - "fmt" + "strings" + pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "k8s.io/apimachinery/pkg/runtime/schema" - "strings" ) const ( @@ -76,7 +76,7 @@ func GetGVR(namespacedOwnerRef *NamespacedOwnerReference) (schema.GroupVersionRe return schema.GroupVersionResource{}, err } - gvr := gv.WithResource(fmt.Sprintf("%ss", strings.ToLower(namespacedOwnerRef.Kind))) + gvr := gv.WithResource(strings.ToLower(namespacedOwnerRef.Kind) + "s") return gvr, nil } diff --git a/pkg/logs/client/http/destination.go b/pkg/logs/client/http/destination.go index 257057bd24b4fc..d1100ba41096d4 100644 --- a/pkg/logs/client/http/destination.go +++ b/pkg/logs/client/http/destination.go @@ -335,7 +335,7 @@ func (d *Destination) unconditionalSend(payload *message.Payload) (err error) { } req.Header.Set("DD-API-KEY", d.endpoint.GetAPIKey()) req.Header.Set("Content-Type", d.contentType) - req.Header.Set("User-Agent", fmt.Sprintf("datadog-agent/%s", version.AgentVersion)) + req.Header.Set("User-Agent", "datadog-agent/"+version.AgentVersion) if payload.Encoding != "" { req.Header.Set("Content-Encoding", payload.Encoding) @@ -476,7 +476,7 @@ func buildURL(endpoint config.Endpoint) string { if endpoint.Version == config.EPIntakeVersion2 && endpoint.TrackType != "" { url.Path = fmt.Sprintf("%s/api/v2/%s", endpoint.PathPrefix, endpoint.TrackType) } else { - url.Path = fmt.Sprintf("%s/v1/input", endpoint.PathPrefix) + url.Path = endpoint.PathPrefix + "/v1/input" } return url.String() } diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go index 94c14c5d529675..cbc203c98bf073 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go @@ -115,11 +115,11 @@ func printMatchUnderline(t *testing.T, context *messageContext, input string, ma if len(input) > maxLen { evalStr = input[:maxLen] } - dbgStr := "" + var dbgBuilder strings.Builder printChar := " " last := context.tokenIndicies[0] for i, idx := range context.tokenIndicies { - dbgStr += strings.Repeat(printChar, idx-last) + dbgBuilder.WriteString(strings.Repeat(printChar, idx-last)) if i == match.start { printChar = "^" } @@ -128,6 +128,6 @@ func printMatchUnderline(t *testing.T, context *messageContext, input string, ma } last = idx } - dbgStr += strings.Repeat(printChar, len(evalStr)-last) - fmt.Printf("\t\t\t%v\n", dbgStr) + dbgBuilder.WriteString(strings.Repeat(printChar, len(evalStr)-last)) + fmt.Printf("\t\t\t%v\n", dbgBuilder.String()) } diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/tokenizer.go b/pkg/logs/internal/decoder/auto_multiline_detection/tokenizer.go index bf740d938a31aa..b2e53c901c6d2f 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/tokenizer.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/tokenizer.go @@ -309,11 +309,11 @@ func tokenToString(token tokens.Token) string { // tokensToString converts a list of tokens to a debug string. func tokensToString(tokens []tokens.Token) string { - str := "" + var builder strings.Builder for _, t := range tokens { - str += tokenToString(t) + builder.WriteString(tokenToString(t)) } - return str + return builder.String() } // isMatch compares two sequences of tokens and returns true if they match within the diff --git a/pkg/logs/internal/decoder/line_parser_test.go b/pkg/logs/internal/decoder/line_parser_test.go index fba313e4ee6275..361275a2ce4486 100644 --- a/pkg/logs/internal/decoder/line_parser_test.go +++ b/pkg/logs/internal/decoder/line_parser_test.go @@ -7,7 +7,7 @@ package decoder import ( "bytes" - "fmt" + "errors" "strings" "testing" "time" @@ -64,7 +64,7 @@ func (u *MockFailingParser) Parse(input *message.Message) (*message.Message, err return msg, nil } msg := message.NewMessage(input.GetContent(), nil, "", 0) - return msg, fmt.Errorf("error") + return msg, errors.New("error") } func (u *MockFailingParser) SupportsPartialLine() bool { diff --git a/pkg/logs/internal/parsers/dockerfile/docker_file.go b/pkg/logs/internal/parsers/dockerfile/docker_file.go index 948d14dace70b2..7080fc777141ad 100644 --- a/pkg/logs/internal/parsers/dockerfile/docker_file.go +++ b/pkg/logs/internal/parsers/dockerfile/docker_file.go @@ -9,6 +9,7 @@ package dockerfile import ( "encoding/json" + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers" @@ -62,7 +63,7 @@ func (p *dockerFileFormat) Parse(msg *message.Message) (*message.Message, error) // Check if log is nil (e.g., when input is the JSON literal null) if log == nil { msg.Status = message.StatusInfo - return msg, fmt.Errorf("cannot parse docker message, invalid format: got null") + return msg, errors.New("cannot parse docker message, invalid format: got null") } var status string diff --git a/pkg/logs/internal/parsers/dockerstream/docker_stream_fuzz_test.go b/pkg/logs/internal/parsers/dockerstream/docker_stream_fuzz_test.go index 95e4bac225e039..576225e08dd646 100644 --- a/pkg/logs/internal/parsers/dockerstream/docker_stream_fuzz_test.go +++ b/pkg/logs/internal/parsers/dockerstream/docker_stream_fuzz_test.go @@ -40,23 +40,23 @@ func FuzzParseDockerStream(f *testing.F) { // Valid messages with headers for _, ts := range timestamps { // stdout messages - msg := fmt.Sprintf("%s valid log message", ts) + msg := ts + " valid log message" f.Add(append(createHeader(1, uint32(len(msg))), []byte(msg)...)) // stderr messages - msg = fmt.Sprintf("%s error log message", ts) + msg = ts + " error log message" f.Add(append(createHeader(2, uint32(len(msg))), []byte(msg)...)) // Empty content after timestamp - msg = fmt.Sprintf("%s ", ts) + msg = ts + " " f.Add(append(createHeader(1, uint32(len(msg))), []byte(msg)...)) } // TTY messages (no header) with valid timestamps for _, ts := range timestamps { - f.Add([]byte(fmt.Sprintf("%s tty message without header", ts))) - f.Add([]byte(ts)) // Just timestamp - f.Add([]byte(fmt.Sprintf("%s ", ts))) // Timestamp with space + f.Add([]byte(ts + " tty message without header")) + f.Add([]byte(ts)) // Just timestamp + f.Add([]byte(ts + " ")) // Timestamp with space } // Large messages that trigger partial handling diff --git a/pkg/logs/internal/parsers/kubernetes/kubernetes_fuzz_test.go b/pkg/logs/internal/parsers/kubernetes/kubernetes_fuzz_test.go index 5efa70f34e46b5..7d933ed05fd0cd 100644 --- a/pkg/logs/internal/parsers/kubernetes/kubernetes_fuzz_test.go +++ b/pkg/logs/internal/parsers/kubernetes/kubernetes_fuzz_test.go @@ -58,7 +58,7 @@ func FuzzParseKubernetes(f *testing.F) { // Very long content longContent := strings.Repeat("A", 10000) - f.Add([]byte(fmt.Sprintf("2018-09-20T11:54:11.753589172Z stdout F %s", longContent))) + f.Add([]byte("2018-09-20T11:54:11.753589172Z stdout F " + longContent)) // Unknown stream types f.Add([]byte("2018-09-20T11:54:11.753589172Z unknown F message")) diff --git a/pkg/logs/internal/util/moving_sum.go b/pkg/logs/internal/util/moving_sum.go index 0118329b4da254..388b1281b14ad6 100644 --- a/pkg/logs/internal/util/moving_sum.go +++ b/pkg/logs/internal/util/moving_sum.go @@ -8,6 +8,7 @@ package util import ( "fmt" + "strconv" "sync" "time" @@ -99,6 +100,6 @@ func (ms *MovingSum) Info() []string { MovingSum := ms.MovingSum() return []string{ - fmt.Sprintf("%d", MovingSum), + strconv.FormatInt(MovingSum, 10), } } diff --git a/pkg/logs/launchers/container/tailerfactory/file.go b/pkg/logs/launchers/container/tailerfactory/file.go index 029b375515fe97..bb193a6e633af3 100644 --- a/pkg/logs/launchers/container/tailerfactory/file.go +++ b/pkg/logs/launchers/container/tailerfactory/file.go @@ -177,14 +177,14 @@ func (tf *factory) findDockerLogPath(containerID string) string { // and set it in place of the usual docker base path overridePath := pkgconfigsetup.Datadog().GetString("logs_config.docker_path_override") if len(overridePath) > 0 { - return filepath.Join(overridePath, "containers", containerID, fmt.Sprintf("%s-json.log", containerID)) + return filepath.Join(overridePath, "containers", containerID, containerID+"-json.log") } switch runtime.GOOS { case "windows": return filepath.Join( dockerLogsBasePathWin, "containers", containerID, - fmt.Sprintf("%s-json.log", containerID)) + containerID+"-json.log") default: // linux, darwin // this config flag provides temporary support for podman while it is // still recognized by AD as a "docker" runtime. @@ -202,7 +202,7 @@ func (tf *factory) findDockerLogPath(containerID string) string { } return filepath.Join( dockerLogsBasePathNix, "containers", containerID, - fmt.Sprintf("%s-json.log", containerID)) + containerID+"-json.log") } } diff --git a/pkg/logs/launchers/file/launcher_test.go b/pkg/logs/launchers/file/launcher_test.go index 5cdeb74bb6bf0e..e0b39b207a0ae5 100644 --- a/pkg/logs/launchers/file/launcher_test.go +++ b/pkg/logs/launchers/file/launcher_test.go @@ -190,8 +190,8 @@ func (suite *BaseLauncherTestSuite) SetupTest() { var err error suite.testDir = suite.setupResult.TestDirs[0] - suite.testPath = fmt.Sprintf("%s/launcher.log", suite.testDir) - suite.testRotatedPath = fmt.Sprintf("%s.1", suite.testPath) + suite.testPath = suite.testDir + "/launcher.log" + suite.testRotatedPath = suite.testPath + ".1" f, err := os.Create(suite.testPath) suite.Nil(err) @@ -490,7 +490,7 @@ func runLauncherScanStartNewTailerTest(t *testing.T, testDirs []string) { testDir := testDirs[i] // create launcher - path = fmt.Sprintf("%s/*.log", testDir) + path = testDir + "/*.log" launcher := createLauncher(t, launcherTestOptions{ openFilesLimit: 2, @@ -505,7 +505,7 @@ func runLauncherScanStartNewTailerTest(t *testing.T, testDirs []string) { defer status.Clear() // create file - path = fmt.Sprintf("%s/test.log", testDir) + path = testDir + "/test.log" file, err := os.Create(path) assert.Nil(t, err) @@ -533,7 +533,7 @@ func runLauncherScanStartNewTailerForEmptyFileTest(t *testing.T, testDirs []stri // Temporarily set the global config for this test // create launcher - path := fmt.Sprintf("%s/*.log", testDir) + path := testDir + "/*.log" // Create fingerprint config for this test fingerprintConfig := types.FingerprintConfig{ @@ -556,7 +556,7 @@ func runLauncherScanStartNewTailerForEmptyFileTest(t *testing.T, testDirs []stri defer status.Clear() // create empty file - _, err := os.Create(fmt.Sprintf("%s/test.log", testDir)) + _, err := os.Create(testDir + "/test.log") assert.Nil(t, err) launcher.resolveActiveTailers(launcher.fileProvider.FilesToTail(context.Background(), launcher.validatePodContainerID, launcher.activeSources, launcher.registry)) @@ -573,7 +573,7 @@ func runLauncherScanStartNewTailerWithOneLineTest(t *testing.T, testDirs []strin testDir := testDirs[0] // create launcher - path := fmt.Sprintf("%s/*.log", testDir) + path := testDir + "/*.log" openFilesLimit := 2 launcher := createLauncher(t, launcherTestOptions{ @@ -588,7 +588,7 @@ func runLauncherScanStartNewTailerWithOneLineTest(t *testing.T, testDirs []strin defer status.Clear() // create file - filePath := fmt.Sprintf("%s/test.log", testDir) + filePath := testDir + "/test.log" file, err := os.Create(filePath) assert.Nil(t, err) @@ -614,7 +614,7 @@ func runLauncherScanStartNewTailerWithLongLineTest(t *testing.T, testDirs []stri // Temporarily set the global config for this test // create launcher - path := fmt.Sprintf("%s/*.log", testDir) + path := testDir + "/*.log" openFilesLimit := 2 // Create fingerprint config for this test @@ -638,7 +638,7 @@ func runLauncherScanStartNewTailerWithLongLineTest(t *testing.T, testDirs []stri defer status.Clear() // create file - filePath := fmt.Sprintf("%s/test.log", testDir) + filePath := testDir + "/test.log" file, err := os.Create(filePath) assert.Nil(t, err) @@ -660,7 +660,7 @@ func TestLauncherScanStartNewTailerWithLongLine(t *testing.T) { func runLauncherWithConcurrentContainerTailerTest(t *testing.T, testDirs []string) { testDir := testDirs[0] - path := fmt.Sprintf("%s/container.log", testDir) + path := testDir + "/container.log" // create launcher openFilesLimit := 3 @@ -670,8 +670,8 @@ func runLauncherWithConcurrentContainerTailerTest(t *testing.T, testDirs []strin launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditorMock.NewMockRegistry() outputChan := launcher.pipelineProvider.NextPipelineChan() - firstSource := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/*.log", testDir), TailingMode: "beginning", Identifier: "123456789"}) - secondSource := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/*.log", testDir), TailingMode: "beginning", Identifier: "987654321"}) + firstSource := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: testDir + "/*.log", TailingMode: "beginning", Identifier: "123456789"}) + secondSource := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: testDir + "/*.log", TailingMode: "beginning", Identifier: "987654321"}) // create/truncate file file, err := os.Create(path) @@ -717,10 +717,10 @@ func runLauncherTailFromTheBeginningTest(t *testing.T, testDirs []string, chmodF launcher.registry = auditorMock.NewMockRegistry() outputChan := launcher.pipelineProvider.NextPipelineChan() sources := []*sources.LogSource{ - sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/test.log", testDir), TailingMode: "beginning"}), - sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/container.log", testDir), TailingMode: "beginning", Identifier: "123456789"}), + sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: testDir + "/test.log", TailingMode: "beginning"}), + sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: testDir + "/container.log", TailingMode: "beginning", Identifier: "123456789"}), // Same file different container ID - sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/container.log", testDir), TailingMode: "beginning", Identifier: "987654321"}), + sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: testDir + "/container.log", TailingMode: "beginning", Identifier: "987654321"}), } for i, source := range sources { @@ -781,8 +781,8 @@ func runLauncherTailFromTheBeginningTest(t *testing.T, testDirs []string, chmodF func runLauncherSetTailTest(t *testing.T, testDirs []string) { testDir := testDirs[0] - path1 := fmt.Sprintf("%s/test.log", testDir) - path2 := fmt.Sprintf("%s/test2.log", testDir) + path1 := testDir + "/test.log" + path2 := testDir + "/test2.log" os.Create(path1) os.Create(path2) openFilesLimit := 2 @@ -807,7 +807,7 @@ func runLauncherSetTailTest(t *testing.T, testDirs []string) { func runLauncherConfigIdentifierTest(t *testing.T, testDirs []string) { testDir := testDirs[0] - path := fmt.Sprintf("%s/test.log", testDir) + path := testDir + "/test.log" os.Create(path) openFilesLimit := 2 @@ -830,20 +830,20 @@ func runLauncherScanWithTooManyFilesTest(t *testing.T, testDirs []string) { var path string testDir := testDirs[0] // creates files - path = fmt.Sprintf("%s/1.log", testDir) + path = testDir + "/1.log" _, err := os.Create(path) assert.Nil(t, err) - path = fmt.Sprintf("%s/2.log", testDir) + path = testDir + "/2.log" _, err = os.Create(path) assert.Nil(t, err) - path = fmt.Sprintf("%s/3.log", testDir) + path = testDir + "/3.log" _, err = os.Create(path) assert.Nil(t, err) // create launcher - path = fmt.Sprintf("%s/*.log", testDir) + path = testDir + "/*.log" openFilesLimit := 2 launcher := createLauncher(t, launcherTestOptions{ @@ -863,7 +863,7 @@ func runLauncherScanWithTooManyFilesTest(t *testing.T, testDirs []string) { // Confirm that all of the files have been keepalive'd even if they are not tailed assert.Equal(t, 3, len(launcher.registry.(*auditorMock.Registry).KeepAlives)) - path = fmt.Sprintf("%s/2.log", testDir) + path = testDir + "/2.log" err = os.Remove(path) assert.Nil(t, err) @@ -873,7 +873,7 @@ func runLauncherScanWithTooManyFilesTest(t *testing.T, testDirs []string) { func runLauncherUpdatesSourceForExistingTailerTest(t *testing.T, testDirs []string) { testDir := testDirs[0] - path := fmt.Sprintf("%s/*.log", testDir) + path := testDir + "/*.log" os.Create(path) openFilesLimit := 2 @@ -949,7 +949,7 @@ func runLauncherScanRecentFilesWithRemovalTest(t *testing.T, testDirs []string) } launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditorMock.NewMockRegistry() - logDirectory := fmt.Sprintf("%s/*.log", testDir) + logDirectory := testDir + "/*.log" source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() @@ -1005,7 +1005,7 @@ func runLauncherScanRecentFilesWithNewFilesTest(t *testing.T, testDirs []string) }) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditorMock.NewMockRegistry() - logDirectory := fmt.Sprintf("%s/*.log", testDir) + logDirectory := testDir + "/*.log" source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() @@ -1065,7 +1065,7 @@ func runLauncherFileRotationTest(t *testing.T, testDirs []string) { }) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditorMock.NewMockRegistry() - logDirectory := fmt.Sprintf("%s/*.log", testDir) + logDirectory := testDir + "/*.log" source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() @@ -1129,7 +1129,7 @@ func runLauncherFileDetectionSingleScanTest(t *testing.T, testDirs []string) { }) launcher.pipelineProvider = mock.NewMockProvider() launcher.registry = auditorMock.NewMockRegistry() - logDirectory := fmt.Sprintf("%s/*.log", testDir) + logDirectory := testDir + "/*.log" source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() diff --git a/pkg/logs/launchers/file/provider/file_provider_test.go b/pkg/logs/launchers/file/provider/file_provider_test.go index 00ee1983ba3bf2..a471b236409420 100644 --- a/pkg/logs/launchers/file/provider/file_provider_test.go +++ b/pkg/logs/launchers/file/provider/file_provider_test.go @@ -91,31 +91,31 @@ func (suite *ProviderTestSuite) SetupTest() { suite.testDir = suite.T().TempDir() // Create directory tree: - path := fmt.Sprintf("%s/1", suite.testDir) + path := suite.testDir + "/1" err = os.Mkdir(path, os.ModePerm) suite.Nil(err) - path = fmt.Sprintf("%s/1/1.log", suite.testDir) + path = suite.testDir + "/1/1.log" _, err = os.Create(path) suite.Nil(err) - path = fmt.Sprintf("%s/1/2.log", suite.testDir) + path = suite.testDir + "/1/2.log" _, err = os.Create(path) suite.Nil(err) - path = fmt.Sprintf("%s/1/3.log", suite.testDir) + path = suite.testDir + "/1/3.log" _, err = os.Create(path) suite.Nil(err) - path = fmt.Sprintf("%s/2", suite.testDir) + path = suite.testDir + "/2" err = os.Mkdir(path, os.ModePerm) suite.Nil(err) - path = fmt.Sprintf("%s/2/1.log", suite.testDir) + path = suite.testDir + "/2/1.log" _, err = os.Create(path) suite.Nil(err) - path = fmt.Sprintf("%s/2/2.log", suite.testDir) + path = suite.testDir + "/2/2.log" _, err = os.Create(path) suite.Nil(err) } @@ -125,7 +125,7 @@ func (suite *ProviderTestSuite) TearDownTest() { } func (suite *ProviderTestSuite) TestFilesToTailReturnsSpecificFile() { - path := fmt.Sprintf("%s/1/1.log", suite.testDir) + path := suite.testDir + "/1/1.log" fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) util.CreateSources(logSources) @@ -133,14 +133,14 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsSpecificFile() { suite.Equal(1, len(files)) suite.False(files[0].IsWildcardPath) - suite.Equal(fmt.Sprintf("%s/1/1.log", suite.testDir), files[0].Path) + suite.Equal(suite.testDir+"/1/1.log", files[0].Path) suite.Equal(make([]string, 0), logSources[0].Messages.GetMessages()) } func (suite *ProviderTestSuite) TestFilesToTailReturnsAllFilesFromDirectory() { mockConfig := configmock.New(suite.T()) - path := fmt.Sprintf("%s/1/*.log", suite.testDir) + path := suite.testDir + "/1/*.log" fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) status.InitStatus(mockConfig, util.CreateSources(logSources)) @@ -150,9 +150,9 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsAllFilesFromDirectory() { suite.True(files[0].IsWildcardPath) suite.True(files[1].IsWildcardPath) suite.True(files[2].IsWildcardPath) - suite.Equal(fmt.Sprintf("%s/1/3.log", suite.testDir), files[0].Path) - suite.Equal(fmt.Sprintf("%s/1/2.log", suite.testDir), files[1].Path) - suite.Equal(fmt.Sprintf("%s/1/1.log", suite.testDir), files[2].Path) + suite.Equal(suite.testDir+"/1/3.log", files[0].Path) + suite.Equal(suite.testDir+"/1/2.log", files[1].Path) + suite.Equal(suite.testDir+"/1/1.log", files[2].Path) suite.Equal([]string{"3 files tailed out of 3 files matching"}, logSources[0].Messages.GetMessages()) suite.Equal( []string{ @@ -165,7 +165,7 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsAllFilesFromDirectory() { func (suite *ProviderTestSuite) TestCollectFilesWildcardFlag() { // with wildcard - path := fmt.Sprintf("%s/1/*.log", suite.testDir) + path := suite.testDir + "/1/*.log" fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) files, err := fileProvider.CollectFiles(logSources[0]) @@ -176,7 +176,7 @@ func (suite *ProviderTestSuite) TestCollectFilesWildcardFlag() { // without wildcard - path = fmt.Sprintf("%s/1/1.log", suite.testDir) + path = suite.testDir + "/1/1.log" fileProvider = NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources = suite.newLogSources(path) files, err = fileProvider.CollectFiles(logSources[0]) @@ -187,7 +187,7 @@ func (suite *ProviderTestSuite) TestCollectFilesWildcardFlag() { } func (suite *ProviderTestSuite) TestFilesToTailReturnsAllFilesFromAnyDirectoryWithRightPermissions() { - path := fmt.Sprintf("%s/*/*1.log", suite.testDir) + path := suite.testDir + "/*/*1.log" fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) util.CreateSources(logSources) @@ -196,15 +196,15 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsAllFilesFromAnyDirectoryWi suite.Equal(2, len(files)) suite.True(files[0].IsWildcardPath) suite.True(files[1].IsWildcardPath) - suite.Equal(fmt.Sprintf("%s/2/1.log", suite.testDir), files[0].Path) - suite.Equal(fmt.Sprintf("%s/1/1.log", suite.testDir), files[1].Path) + suite.Equal(suite.testDir+"/2/1.log", files[0].Path) + suite.Equal(suite.testDir+"/1/1.log", files[1].Path) suite.Equal([]string{"2 files tailed out of 2 files matching"}, logSources[0].Messages.GetMessages()) } func (suite *ProviderTestSuite) TestFilesToTailReturnsSpecificFileWithWildcard() { mockConfig := configmock.New(suite.T()) - path := fmt.Sprintf("%s/1/?.log", suite.testDir) + path := suite.testDir + "/1/?.log" fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) status.InitStatus(mockConfig, util.CreateSources(logSources)) @@ -214,9 +214,9 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsSpecificFileWithWildcard() suite.True(files[0].IsWildcardPath) suite.True(files[1].IsWildcardPath) suite.True(files[2].IsWildcardPath) - suite.Equal(fmt.Sprintf("%s/1/3.log", suite.testDir), files[0].Path) - suite.Equal(fmt.Sprintf("%s/1/2.log", suite.testDir), files[1].Path) - suite.Equal(fmt.Sprintf("%s/1/1.log", suite.testDir), files[2].Path) + suite.Equal(suite.testDir+"/1/3.log", files[0].Path) + suite.Equal(suite.testDir+"/1/2.log", files[1].Path) + suite.Equal(suite.testDir+"/1/1.log", files[2].Path) suite.Equal([]string{"3 files tailed out of 3 files matching"}, logSources[0].Messages.GetMessages()) suite.Equal( []string{ @@ -228,7 +228,7 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsSpecificFileWithWildcard() func (suite *ProviderTestSuite) TestWildcardPathsAreSorted() { filesLimit := 6 - path := fmt.Sprintf("%s/*/*.log", suite.testDir) + path := suite.testDir + "/*/*.log" fileProvider := NewFileProvider(filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) files := fileProvider.FilesToTail(context.Background(), true, logSources, auditor.NewMockAuditor()) @@ -236,17 +236,17 @@ func (suite *ProviderTestSuite) TestWildcardPathsAreSorted() { for i := 0; i < len(files); i++ { suite.Assert().True(files[i].IsWildcardPath) } - suite.Equal(fmt.Sprintf("%s/1/3.log", suite.testDir), files[0].Path) - suite.Equal(fmt.Sprintf("%s/2/2.log", suite.testDir), files[1].Path) - suite.Equal(fmt.Sprintf("%s/1/2.log", suite.testDir), files[2].Path) - suite.Equal(fmt.Sprintf("%s/2/1.log", suite.testDir), files[3].Path) - suite.Equal(fmt.Sprintf("%s/1/1.log", suite.testDir), files[4].Path) + suite.Equal(suite.testDir+"/1/3.log", files[0].Path) + suite.Equal(suite.testDir+"/2/2.log", files[1].Path) + suite.Equal(suite.testDir+"/1/2.log", files[2].Path) + suite.Equal(suite.testDir+"/2/1.log", files[3].Path) + suite.Equal(suite.testDir+"/1/1.log", files[4].Path) } func (suite *ProviderTestSuite) TestNumberOfFilesToTailDoesNotExceedLimit() { mockConfig := configmock.New(suite.T()) - path := fmt.Sprintf("%s/*/*.log", suite.testDir) + path := suite.testDir + "/*/*.log" fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) status.InitStatus(mockConfig, util.CreateSources(logSources)) @@ -266,8 +266,8 @@ func (suite *ProviderTestSuite) TestAllWildcardPathsAreUpdated() { filesLimit := 2 fileProvider := NewFileProvider(filesLimit, WildcardUseFileName) logSources := []*sources.LogSource{ - sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/1/*.log", suite.testDir)}), - sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/2/*.log", suite.testDir)}), + sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: suite.testDir + "/1/*.log"}), + sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: suite.testDir + "/2/*.log"}), } status.InitStatus(mockConfig, util.CreateSources(logSources)) files := fileProvider.FilesToTail(context.Background(), true, logSources, auditor.NewMockAuditor()) @@ -287,9 +287,9 @@ func (suite *ProviderTestSuite) TestAllWildcardPathsAreUpdated() { status.Get(false).Warnings, ) - os.Remove(fmt.Sprintf("%s/1/2.log", suite.testDir)) - os.Remove(fmt.Sprintf("%s/1/3.log", suite.testDir)) - os.Remove(fmt.Sprintf("%s/2/2.log", suite.testDir)) + os.Remove(suite.testDir + "/1/2.log") + os.Remove(suite.testDir + "/1/3.log") + os.Remove(suite.testDir + "/2/2.log") files = fileProvider.FilesToTail(context.Background(), true, logSources, auditor.NewMockAuditor()) suite.Equal(2, len(files)) suite.Equal([]string{"1 files tailed out of 1 files matching"}, logSources[0].Messages.GetMessages()) @@ -302,7 +302,7 @@ func (suite *ProviderTestSuite) TestAllWildcardPathsAreUpdated() { status.Get(false).Warnings, ) - os.Remove(fmt.Sprintf("%s/2/1.log", suite.testDir)) + os.Remove(suite.testDir + "/2/1.log") files = fileProvider.FilesToTail(context.Background(), true, logSources, auditor.NewMockAuditor()) suite.Equal(1, len(files)) @@ -313,8 +313,8 @@ func (suite *ProviderTestSuite) TestAllWildcardPathsAreUpdated() { func (suite *ProviderTestSuite) TestExcludePath() { filesLimit := 6 - path := fmt.Sprintf("%s/*/*.log", suite.testDir) - excludePaths := []string{fmt.Sprintf("%s/2/*.log", suite.testDir)} + path := suite.testDir + "/*/*.log" + excludePaths := []string{suite.testDir + "/2/*.log"} fileProvider := NewFileProvider(filesLimit, WildcardUseFileName) logSources := []*sources.LogSource{ sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: path, ExcludePaths: excludePaths}), @@ -325,9 +325,9 @@ func (suite *ProviderTestSuite) TestExcludePath() { for i := 0; i < len(files); i++ { suite.Assert().True(files[i].IsWildcardPath) } - suite.Equal(fmt.Sprintf("%s/1/3.log", suite.testDir), files[0].Path) - suite.Equal(fmt.Sprintf("%s/1/2.log", suite.testDir), files[1].Path) - suite.Equal(fmt.Sprintf("%s/1/1.log", suite.testDir), files[2].Path) + suite.Equal(suite.testDir+"/1/3.log", files[0].Path) + suite.Equal(suite.testDir+"/1/2.log", files[1].Path) + suite.Equal(suite.testDir+"/1/1.log", files[2].Path) } func TestProviderTestSuite(t *testing.T) { diff --git a/pkg/logs/message/message.go b/pkg/logs/message/message.go index 953263965db58c..e564441ddfea98 100644 --- a/pkg/logs/message/message.go +++ b/pkg/logs/message/message.go @@ -8,7 +8,7 @@ package message import ( "encoding/json" - "fmt" + "errors" "time" "github.com/DataDog/datadog-agent/pkg/logs/sources" @@ -295,9 +295,9 @@ func (m *Message) Render() ([]byte, error) { case StateRendered: return m.content, nil case StateEncoded: - return m.content, fmt.Errorf("render call on an encoded message") + return m.content, errors.New("render call on an encoded message") default: - return m.content, fmt.Errorf("unknown message state for rendering") + return m.content, errors.New("unknown message state for rendering") } } @@ -390,12 +390,12 @@ func (m *MessageMetadata) RecordProcessingRule(ruleType string, ruleName string) // TruncatedReasonTag returns a tag with the reason for truncation. func TruncatedReasonTag(reason string) string { - return fmt.Sprintf("truncated:%s", reason) + return "truncated:" + reason } // MultiLineSourceTag returns a tag for multiline logs. func MultiLineSourceTag(source string) string { - return fmt.Sprintf("multiline:%s", source) + return "multiline:" + source } // IsMRF returns true if the payload should be sent to MRF endpoints. diff --git a/pkg/logs/processor/json.go b/pkg/logs/processor/json.go index d19b3f8406c939..3d34a94aa55060 100644 --- a/pkg/logs/processor/json.go +++ b/pkg/logs/processor/json.go @@ -7,6 +7,7 @@ package processor import ( "encoding/json" + "errors" "fmt" "time" @@ -38,7 +39,7 @@ type jsonPayload struct { // Encode encodes a message into a JSON byte array. func (j *jsonEncoder) Encode(msg *message.Message, hostname string) error { if msg.State != message.StateRendered { - return fmt.Errorf("message passed to encoder isn't rendered") + return errors.New("message passed to encoder isn't rendered") } ts := time.Now().UTC() diff --git a/pkg/logs/processor/json_serverless_init.go b/pkg/logs/processor/json_serverless_init.go index 7be9c6b1da267a..a4821b821b4710 100644 --- a/pkg/logs/processor/json_serverless_init.go +++ b/pkg/logs/processor/json_serverless_init.go @@ -7,6 +7,7 @@ package processor import ( "encoding/json" + "errors" "fmt" "time" @@ -37,7 +38,7 @@ type jsonServerlessInitPayload struct { // Encode encodes a message into a JSON byte array. func (j *jsonServerlessInitEncoder) Encode(msg *message.Message, hostname string) error { if msg.State != message.StateRendered { - return fmt.Errorf("message passed to encoder isn't rendered") + return errors.New("message passed to encoder isn't rendered") } ts := time.Now().UTC() diff --git a/pkg/logs/processor/proto.go b/pkg/logs/processor/proto.go index 573dca393dfba7..eb0d04e838abbf 100644 --- a/pkg/logs/processor/proto.go +++ b/pkg/logs/processor/proto.go @@ -6,6 +6,7 @@ package processor import ( + "errors" "fmt" "time" @@ -22,7 +23,7 @@ type protoEncoder struct{} // Encode encodes a message into a protobuf byte array. func (p *protoEncoder) Encode(msg *message.Message, hostname string) error { if msg.State != message.StateRendered { - return fmt.Errorf("message passed to encoder isn't rendered") + return errors.New("message passed to encoder isn't rendered") } log := &pb.Log{ diff --git a/pkg/logs/schedulers/ad/scheduler_test.go b/pkg/logs/schedulers/ad/scheduler_test.go index 389f6d62ab493c..33d065e8220179 100644 --- a/pkg/logs/schedulers/ad/scheduler_test.go +++ b/pkg/logs/schedulers/ad/scheduler_test.go @@ -248,7 +248,7 @@ func TestProcessLogPriorityOverManualConfig(t *testing.T) { LogsConfig: []byte(`[{"type":"file","path":"/var/log/app.log","service":"process-service"}]`), Provider: names.ProcessLog, Name: "process-config", - ServiceID: fmt.Sprintf("%s:///var/log/app.log", names.ProcessLog), + ServiceID: names.ProcessLog + ":///var/log/app.log", } // Clear events for the next test @@ -269,7 +269,7 @@ func TestProcessLogConfigAllowedWhenNoConflict(t *testing.T) { LogsConfig: []byte(`[{"type":"file","path":"/var/log/process.log","service":"process-service"}]`), Provider: names.ProcessLog, Name: "process-config", - ServiceID: fmt.Sprintf("%s:///var/log/process.log", names.ProcessLog), + ServiceID: names.ProcessLog + ":///var/log/process.log", } // Schedule the process_log config @@ -292,7 +292,7 @@ func TestNonFileTypeProcessLogConfigAllowed(t *testing.T) { LogsConfig: []byte(`[{"type":"tcp","service":"process-service"}]`), Provider: names.ProcessLog, Name: "process-config", - ServiceID: fmt.Sprintf("%s://test-service", names.ProcessLog), + ServiceID: names.ProcessLog + "://test-service", } // Schedule the process_log config diff --git a/pkg/logs/sources/config_source_test.go b/pkg/logs/sources/config_source_test.go index d66227cae62f59..6e51bc2e5c0039 100644 --- a/pkg/logs/sources/config_source_test.go +++ b/pkg/logs/sources/config_source_test.go @@ -6,7 +6,6 @@ package sources import ( - "fmt" "os" "testing" @@ -23,7 +22,7 @@ func CreateTestFile(tempDir string) *os.File { } // Specify the exact file name - filePath := fmt.Sprintf("%s/config.yaml", tempDir) + filePath := tempDir + "/config.yaml" // Create the file with the specified name tempFile, err := os.Create(filePath) diff --git a/pkg/logs/status/builder.go b/pkg/logs/status/builder.go index b27f98bfa2edc3..faa4a5b74c5074 100644 --- a/pkg/logs/status/builder.go +++ b/pkg/logs/status/builder.go @@ -8,7 +8,7 @@ package status import ( "expvar" - "fmt" + "strconv" "strings" "time" @@ -198,13 +198,13 @@ func (b *Builder) toDictionary(c *config.LogsConfig) map[string]interface{} { // getMetricsStatus exposes some aggregated metrics of the log agent on the agent status func (b *Builder) getMetricsStatus() map[string]string { var metrics = make(map[string]string) - metrics["LogsProcessed"] = fmt.Sprintf("%v", b.logsExpVars.Get("LogsProcessed").(*expvar.Int).Value()) - metrics["LogsSent"] = fmt.Sprintf("%v", b.logsExpVars.Get("LogsSent").(*expvar.Int).Value()) - metrics["BytesSent"] = fmt.Sprintf("%v", b.logsExpVars.Get("BytesSent").(*expvar.Int).Value()) - metrics["RetryCount"] = fmt.Sprintf("%v", b.logsExpVars.Get("RetryCount").(*expvar.Int).Value()) + metrics["LogsProcessed"] = strconv.FormatInt(b.logsExpVars.Get("LogsProcessed").(*expvar.Int).Value(), 10) + metrics["LogsSent"] = strconv.FormatInt(b.logsExpVars.Get("LogsSent").(*expvar.Int).Value(), 10) + metrics["BytesSent"] = strconv.FormatInt(b.logsExpVars.Get("BytesSent").(*expvar.Int).Value(), 10) + metrics["RetryCount"] = strconv.FormatInt(b.logsExpVars.Get("RetryCount").(*expvar.Int).Value(), 10) metrics["RetryTimeSpent"] = time.Duration(b.logsExpVars.Get("RetryTimeSpent").(*expvar.Int).Value()).String() - metrics["EncodedBytesSent"] = fmt.Sprintf("%v", b.logsExpVars.Get("EncodedBytesSent").(*expvar.Int).Value()) - metrics["LogsTruncated"] = fmt.Sprintf("%v", b.logsExpVars.Get("LogsTruncated").(*expvar.Int).Value()) + metrics["EncodedBytesSent"] = strconv.FormatInt(b.logsExpVars.Get("EncodedBytesSent").(*expvar.Int).Value(), 10) + metrics["LogsTruncated"] = strconv.FormatInt(b.logsExpVars.Get("LogsTruncated").(*expvar.Int).Value(), 10) return metrics } diff --git a/pkg/logs/status/status_test.go b/pkg/logs/status/status_test.go index 32d906ddde45f1..762e1656bb02b7 100644 --- a/pkg/logs/status/status_test.go +++ b/pkg/logs/status/status_test.go @@ -6,8 +6,8 @@ package status import ( - "fmt" "math" + "strconv" "testing" "time" @@ -44,7 +44,7 @@ func TestSourceAreGroupedByIntegrations(t *testing.T) { case "bar": assert.Equal(t, 1, len(integration.Sources)) default: - assert.Fail(t, fmt.Sprintf("Expected foo or bar, got %s", integration.Name)) + assert.Fail(t, "Expected foo or bar, got "+integration.Name) } } } @@ -140,7 +140,7 @@ func TestStatusMetrics(t *testing.T) { metrics.LogsProcessed.Set(math.MaxInt64) metrics.LogsProcessed.Add(1) status = Get(false) - assert.Equal(t, fmt.Sprintf("%v", math.MinInt64), status.StatusMetrics["LogsProcessed"]) + assert.Equal(t, strconv.Itoa(math.MinInt64), status.StatusMetrics["LogsProcessed"]) } func TestStatusEndpoints(t *testing.T) { diff --git a/pkg/logs/status/utils/info.go b/pkg/logs/status/utils/info.go index c97c0f1fd73655..6fd8d2463576b4 100644 --- a/pkg/logs/status/utils/info.go +++ b/pkg/logs/status/utils/info.go @@ -7,7 +7,7 @@ package utils import ( - "fmt" + "strconv" "sync" "go.uber.org/atomic" @@ -63,7 +63,7 @@ func (c *CountInfo) InfoKey() string { // Info returns the info func (c *CountInfo) Info() []string { - return []string{fmt.Sprintf("%d", c.count.Load())} + return []string{strconv.FormatInt(c.count.Load(), 10)} } // MappedInfo collects multiple info messages with a unique key diff --git a/pkg/logs/status/utils/status.go b/pkg/logs/status/utils/status.go index f4c95344a207a9..d2e2b5ca7359c3 100644 --- a/pkg/logs/status/utils/status.go +++ b/pkg/logs/status/utils/status.go @@ -46,7 +46,7 @@ func (s *LogStatus) Error(err error) { s.mu.Lock() defer s.mu.Unlock() s.status = isError - s.err = fmt.Sprintf("Error: %s", err.Error()) + s.err = "Error: " + err.Error() } // IsPending returns whether the current status is not yet determined. diff --git a/pkg/logs/tailers/container/tailer.go b/pkg/logs/tailers/container/tailer.go index a3081ebee71ab8..8115795d451863 100644 --- a/pkg/logs/tailers/container/tailer.go +++ b/pkg/logs/tailers/container/tailer.go @@ -190,7 +190,7 @@ func NewDockerTailer( // Identifier returns a string that uniquely identifies a source func (t *Tailer) Identifier() string { - return fmt.Sprintf("docker:%s", t.ContainerID) + return "docker:" + t.ContainerID } // Stop stops the tailer from reading new container logs, diff --git a/pkg/logs/tailers/container/tailer_test.go b/pkg/logs/tailers/container/tailer_test.go index 5e539355a5363d..c2f88f058a149f 100644 --- a/pkg/logs/tailers/container/tailer_test.go +++ b/pkg/logs/tailers/container/tailer_test.go @@ -10,6 +10,7 @@ package container import ( "context" + "errors" "fmt" "io" "strings" @@ -162,7 +163,7 @@ func TestTailer_readForever(t *testing.T) { name: "The reader has been closed during the shut down process", newTailer: func() *Tailer { _, cancelFunc := context.WithCancel(context.Background()) - reader := NewTestReader("", fmt.Errorf("http: read on closed response body"), nil) + reader := NewTestReader("", errors.New("http: read on closed response body"), nil) tailer := NewTestTailer(reader, reader, cancelFunc) return tailer }, @@ -177,7 +178,7 @@ func TestTailer_readForever(t *testing.T) { name: "The agent is stopping", newTailer: func() *Tailer { _, cancelFunc := context.WithCancel(context.Background()) - reader := NewTestReader("", fmt.Errorf("use of closed network connection"), nil) + reader := NewTestReader("", errors.New("use of closed network connection"), nil) tailer := NewTestTailer(reader, reader, cancelFunc) return tailer }, @@ -195,7 +196,7 @@ func TestTailer_readForever(t *testing.T) { // init the fake reader with an io.EOF initialReader := NewTestReader("", io.EOF, nil) // then the new reader return by the unsafeReader client will return close network connection to simulate stop agent - connectionCloseReader := NewTestReader("", fmt.Errorf("use of closed network connection"), nil) + connectionCloseReader := NewTestReader("", errors.New("use of closed network connection"), nil) tailer := NewTestTailer(initialReader, connectionCloseReader, cancelFunc) return tailer }, @@ -210,7 +211,7 @@ func TestTailer_readForever(t *testing.T) { name: "default case with random error", newTailer: func() *Tailer { _, cancelFunc := context.WithCancel(context.Background()) - reader := NewTestReader("", fmt.Errorf("this is a random error"), nil) + reader := NewTestReader("", errors.New("this is a random error"), nil) tailer := NewTestTailer(reader, reader, cancelFunc) return tailer }, diff --git a/pkg/logs/tailers/file/file.go b/pkg/logs/tailers/file/file.go index d958127c5fe359..d4db25d6fe4f00 100644 --- a/pkg/logs/tailers/file/file.go +++ b/pkg/logs/tailers/file/file.go @@ -47,5 +47,5 @@ func (t *File) GetScanKey() string { // Identifier returns a unique identifier for this file func (t *File) Identifier() string { - return fmt.Sprintf("file:%s", t.Path) + return "file:" + t.Path } diff --git a/pkg/logs/tailers/file/fingerprint.go b/pkg/logs/tailers/file/fingerprint.go index 8ec3be73f80621..9bfceb30bd6431 100644 --- a/pkg/logs/tailers/file/fingerprint.go +++ b/pkg/logs/tailers/file/fingerprint.go @@ -7,6 +7,7 @@ package file import ( "bufio" + "errors" "fmt" "hash/crc64" "io" @@ -149,7 +150,7 @@ func (f *fingerprinterImpl) ComputeFingerprintFromHandle(osFile afero.File, fing } if osFile == nil { - return newInvalidFingerprint(nil), fmt.Errorf("osFile cannot be nil") + return newInvalidFingerprint(nil), errors.New("osFile cannot be nil") } // Get file path for logging purposes @@ -320,13 +321,13 @@ func (f *FingerprintConfigInfo) Info() []string { if f.config.FingerprintStrategy == types.FingerprintStrategyDisabled { return []string{ - fmt.Sprintf("Source: %s", source), + "Source: " + source, "Strategy: disabled", } } info := []string{ - fmt.Sprintf("Source: %s", source), + "Source: " + source, fmt.Sprintf("Strategy: %s", f.config.FingerprintStrategy), } diff --git a/pkg/logs/tailers/file/tailer.go b/pkg/logs/tailers/file/tailer.go index 04ea94c3d068bf..a65b7fb845c518 100644 --- a/pkg/logs/tailers/file/tailer.go +++ b/pkg/logs/tailers/file/tailer.go @@ -369,8 +369,8 @@ func (t *Tailer) readForever() { // buildTailerTags groups the file tag, directory (if wildcard path) and user tags func (t *Tailer) buildTailerTags() []string { tags := []string{ - fmt.Sprintf("filename:%s", filepath.Base(t.file.Path)), - fmt.Sprintf("dirname:%s", filepath.Dir(t.file.Path)), + "filename:" + filepath.Base(t.file.Path), + "dirname:" + filepath.Dir(t.file.Path), } return tags } diff --git a/pkg/logs/tailers/file/tailer_test.go b/pkg/logs/tailers/file/tailer_test.go index 16cf8e1ffd621e..1c43b73ffac7cb 100644 --- a/pkg/logs/tailers/file/tailer_test.go +++ b/pkg/logs/tailers/file/tailer_test.go @@ -272,7 +272,7 @@ func (suite *TailerTestSuite) TestWithBlanklines() { func (suite *TailerTestSuite) TestTailerIdentifier() { suite.tailer.StartFromBeginning() suite.Equal( - fmt.Sprintf("file:%s", filepath.Join(suite.testDir, "tailer.log")), + "file:"+filepath.Join(suite.testDir, "tailer.log"), suite.tailer.Identifier()) } diff --git a/pkg/logs/tailers/journald/tailer_util.go b/pkg/logs/tailers/journald/tailer_util.go index c66ef28827a2c5..af60500c1f1a1d 100644 --- a/pkg/logs/tailers/journald/tailer_util.go +++ b/pkg/logs/tailers/journald/tailer_util.go @@ -8,7 +8,6 @@ package journald import ( - "fmt" "math/rand" "strings" "time" @@ -61,5 +60,5 @@ func getDockerImageShortName(containerID string, tags []string) (string, bool) { } func getImageCacheKey(containerID string) string { - return fmt.Sprintf("logger.tailer.imagefor.%s", containerID) + return "logger.tailer.imagefor." + containerID } diff --git a/pkg/logs/tailers/socket/tailer.go b/pkg/logs/tailers/socket/tailer.go index afa7e8b5112537..05db3f3d4dc47f 100644 --- a/pkg/logs/tailers/socket/tailer.go +++ b/pkg/logs/tailers/socket/tailer.go @@ -7,7 +7,6 @@ package socket import ( - "fmt" "io" "net" "strings" @@ -110,7 +109,7 @@ func (t *Tailer) readForever() { } else { ipAddressWithoutPort = ipAddress } - sourceHostTag := fmt.Sprintf("source_host:%s", ipAddressWithoutPort) + sourceHostTag := "source_host:" + ipAddressWithoutPort msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, sourceHostTag) } t.decoder.InputChan() <- msg diff --git a/pkg/logs/tailers/windowsevent/tailer_test.go b/pkg/logs/tailers/windowsevent/tailer_test.go index 6f94560cfc6aa7..686df7da0c8bb6 100644 --- a/pkg/logs/tailers/windowsevent/tailer_test.go +++ b/pkg/logs/tailers/windowsevent/tailer_test.go @@ -42,7 +42,7 @@ type ReadEventsSuite struct { func TestReadEventsSuite(t *testing.T) { for _, tiName := range eventlog_test.GetEnabledAPITesters() { - t.Run(fmt.Sprintf("%sAPI", tiName), func(t *testing.T) { + t.Run(tiName+"API", func(t *testing.T) { if tiName != "Windows" { t.Skipf("skipping %s: test interface not implemented", tiName) } @@ -94,7 +94,7 @@ func newtailer(evtapi evtapi.API, tailerconfig *Config, bookmark string, msgChan } else if source.Status.IsError() { return errors.New(source.Status.GetError()) } - return fmt.Errorf("start pending") + return errors.New("start pending") }, backoff.NewConstantBackOff(50*time.Millisecond)) if err != nil { return nil, fmt.Errorf("failed to start tailer: %w", err) @@ -186,11 +186,11 @@ func (s *ReadEventsSuite) TestRecoverFromBrokenSubscription() { s.ti.KillEventLogService(s.T()) err = backoff.Retry(func() error { if tailer.source.Status.IsSuccess() { - return fmt.Errorf("tailer is still running") + return errors.New("tailer is still running") } else if tailer.source.Status.IsError() { return nil } - return fmt.Errorf("start pending") + return errors.New("start pending") }, backoff.NewConstantBackOff(50*time.Millisecond)) s.Require().NoError(err, "tailer should catch the error and update the source status") fmt.Println(tailer.source.Status.GetError()) @@ -203,7 +203,7 @@ func (s *ReadEventsSuite) TestRecoverFromBrokenSubscription() { } else if tailer.source.Status.IsError() { return errors.New(tailer.source.Status.GetError()) } - return fmt.Errorf("start pending") + return errors.New("start pending") }, backoff.NewConstantBackOff(50*time.Millisecond)) s.Require().NoError(err, "tailer should auto restart after an error is resolved") diff --git a/pkg/logs/util/windowsevent/transform.go b/pkg/logs/util/windowsevent/transform.go index 5c5f53a0f5ac62..9ee2e1d81e0f39 100644 --- a/pkg/logs/util/windowsevent/transform.go +++ b/pkg/logs/util/windowsevent/transform.go @@ -9,6 +9,7 @@ package windowsevent import ( "bytes" "encoding/hex" + "errors" "fmt" "strings" @@ -254,7 +255,7 @@ func formatEventBinaryData(mv mxj.Map) error { // utf16decode converts ut16le bytes to utf8 bytes func convertUTF16ToUTF8(b []byte) ([]byte, error) { if len(b)%2 != 0 { - return nil, fmt.Errorf("length must be an even number") + return nil, errors.New("length must be an even number") } // UTF-16 little-endian (UTF-16LE) is the encoding standard in the Windows operating system. // https://learn.microsoft.com/en-us/globalization/encoding/transformations-of-unicode-code-points diff --git a/pkg/metrics/iterable_series_test.go b/pkg/metrics/iterable_series_test.go index 6d6e1dab630a23..b4c3261aa521b0 100644 --- a/pkg/metrics/iterable_series_test.go +++ b/pkg/metrics/iterable_series_test.go @@ -6,7 +6,6 @@ package metrics import ( - "fmt" "strconv" "strings" "testing" @@ -63,7 +62,7 @@ func TestIterableSeriesReceiverStopped(_ *testing.T) { func BenchmarkIterableSeries(b *testing.B) { for bufferSize := 1000; bufferSize <= 8000; bufferSize *= 2 { - b.Run(fmt.Sprintf("%v", bufferSize), func(b *testing.B) { + b.Run(strconv.Itoa(bufferSize), func(b *testing.B) { Serialize( NewIterableSeries(func(*Serie) {}, 100, bufferSize), NewIterableSketches(func(*SketchSeries) {}, 10, 2), diff --git a/pkg/metrics/rate.go b/pkg/metrics/rate.go index c880b13c0ca0e7..62088477c28c60 100644 --- a/pkg/metrics/rate.go +++ b/pkg/metrics/rate.go @@ -6,7 +6,7 @@ package metrics import ( - "fmt" + "errors" ) // Rate tracks the rate of a metric over 2 successive flushes @@ -32,7 +32,7 @@ func (r *Rate) flush(_ float64) ([]*Serie, error) { } if r.timestamp == r.previousTimestamp { - return []*Serie{}, fmt.Errorf("Rate was sampled twice at the same timestamp, can't compute a rate") + return []*Serie{}, errors.New("Rate was sampled twice at the same timestamp, can't compute a rate") } value, ts := (r.sample-r.previousSample)/(r.timestamp-r.previousTimestamp), r.timestamp @@ -40,7 +40,7 @@ func (r *Rate) flush(_ float64) ([]*Serie, error) { r.sample, r.timestamp = 0., 0. if value < 0 { - return []*Serie{}, fmt.Errorf("Rate value is negative, discarding it (the underlying counter may have been reset)") + return []*Serie{}, errors.New("Rate value is negative, discarding it (the underlying counter may have been reset)") } return []*Serie{ diff --git a/pkg/network/config/config_test.go b/pkg/network/config/config_test.go index df6111b8555a67..12413daf76a751 100644 --- a/pkg/network/config/config_test.go +++ b/pkg/network/config/config_test.go @@ -8,7 +8,6 @@ package config import ( - "fmt" "os" "runtime" "strconv" @@ -204,7 +203,7 @@ func TestMaxClosedConnectionsBuffered(t *testing.T) { t.Run("value set", func(t *testing.T) { mock.NewSystemProbe(t) - t.Setenv("DD_SYSTEM_PROBE_CONFIG_MAX_CLOSED_CONNECTIONS_BUFFERED", fmt.Sprintf("%d", maxTrackedConnections-1)) + t.Setenv("DD_SYSTEM_PROBE_CONFIG_MAX_CLOSED_CONNECTIONS_BUFFERED", strconv.FormatUint(uint64(maxTrackedConnections-1), 10)) cfg := New() require.Equal(t, maxTrackedConnections-1, cfg.MaxClosedConnectionsBuffered) @@ -223,7 +222,7 @@ func TestMaxFailedConnectionsBuffered(t *testing.T) { t.Run("value set", func(t *testing.T) { mock.NewSystemProbe(t) - t.Setenv("DD_NETWORK_CONFIG_MAX_FAILED_CONNECTIONS_BUFFERED", fmt.Sprintf("%d", maxTrackedConnections-1)) + t.Setenv("DD_NETWORK_CONFIG_MAX_FAILED_CONNECTIONS_BUFFERED", strconv.FormatUint(uint64(maxTrackedConnections-1), 10)) cfg := New() require.Equal(t, maxTrackedConnections-1, cfg.MaxFailedConnectionsBuffered) diff --git a/pkg/network/dns/monitor_linux.go b/pkg/network/dns/monitor_linux.go index 334d66d317845c..1ff498faae1add 100644 --- a/pkg/network/dns/monitor_linux.go +++ b/pkg/network/dns/monitor_linux.go @@ -8,6 +8,7 @@ package dns import ( + "errors" "fmt" "math" @@ -78,7 +79,7 @@ func NewReverseDNS(cfg *config.Config, _ telemetry.Component) (ReverseDNS, error filter, _ := p.GetProbe(manager.ProbeIdentificationPair{EBPFFuncName: probes.SocketDNSFilter, UID: probeUID}) if filter == nil { - return nil, fmt.Errorf("error retrieving socket filter") + return nil, errors.New("error retrieving socket filter") } if err = packetSrc.SetEbpf(filter); err != nil { diff --git a/pkg/network/driver/driver_windows.go b/pkg/network/driver/driver_windows.go index 79ae3227c8e431..185ee7c9ebe1dc 100644 --- a/pkg/network/driver/driver_windows.go +++ b/pkg/network/driver/driver_windows.go @@ -8,7 +8,6 @@ package driver import ( "errors" - "fmt" "sync" "go.uber.org/atomic" @@ -48,7 +47,7 @@ func Stop() error { return ErrDriverNotInitialized } if driverRef.inuse.Load() == 0 { - return fmt.Errorf("driver.Stop called without corresponding Start") + return errors.New("driver.Stop called without corresponding Start") } return driverRef.stop(false) } diff --git a/pkg/network/driver_interface_fail_test.go b/pkg/network/driver_interface_fail_test.go index 549e1065c66a54..ea42825559d4bf 100644 --- a/pkg/network/driver_interface_fail_test.go +++ b/pkg/network/driver_interface_fail_test.go @@ -8,6 +8,7 @@ package network import ( + "errors" "fmt" "testing" @@ -33,7 +34,7 @@ func (tdh *TestDriverHandleFail) ReadFile(p []byte, bytesRead *uint32, ol *windo if tdh.lastReturnBytes == 0 && tdh.lastError == windows.ERROR_MORE_DATA { // last time we returned empty but more...if caller does that twice in a row it's bad if len(p) <= tdh.lastBufferSize { - panic(fmt.Errorf("Consecutive calls")) + panic(errors.New("Consecutive calls")) } } } @@ -48,7 +49,7 @@ func (tdh *TestDriverHandleFail) GetWindowsHandle() windows.Handle { func (tdh *TestDriverHandleFail) DeviceIoControl(ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *windows.Overlapped) (err error) { fmt.Printf("Got test ioctl call") if ioControlCode != 0 { - return fmt.Errorf("wrong ioctl code") + return errors.New("wrong ioctl code") } return nil } diff --git a/pkg/network/ebpf/bpf_module.go b/pkg/network/ebpf/bpf_module.go index 8e4d71bff3248e..11d3e94f00cc3a 100644 --- a/pkg/network/ebpf/bpf_module.go +++ b/pkg/network/ebpf/bpf_module.go @@ -24,10 +24,10 @@ var telemetryMu sync.Mutex // ModuleFileName constructs the module file name based on the module name func ModuleFileName(moduleName string, debug bool) string { if debug { - return fmt.Sprintf("%s-debug.o", moduleName) + return moduleName + "-debug.o" } - return fmt.Sprintf("%s.o", moduleName) + return moduleName + ".o" } func readModule(bpfDir, moduleName string, debug bool) (bytecode.AssetReader, error) { diff --git a/pkg/network/encoding/marshal/modeler_test.go b/pkg/network/encoding/marshal/modeler_test.go index 599f8d6c4b2dc4..5e1d2f9a6fd45d 100644 --- a/pkg/network/encoding/marshal/modeler_test.go +++ b/pkg/network/encoding/marshal/modeler_test.go @@ -6,7 +6,7 @@ package marshal import ( - "fmt" + "errors" "strconv" "sync" "testing" @@ -78,7 +78,7 @@ func TestConnectionModelerError(t *testing.T) { }() kernel.RootNSPID = func() (int, error) { - return 0, fmt.Errorf("some RootNSPID error") + return 0, errors.New("some RootNSPID error") } mock.NewSystemProbe(t) diff --git a/pkg/network/ephemeral_windows.go b/pkg/network/ephemeral_windows.go index d5fcb904db6d06..43be8982f36e92 100644 --- a/pkg/network/ephemeral_windows.go +++ b/pkg/network/ephemeral_windows.go @@ -6,7 +6,7 @@ package network import ( - "fmt" + "errors" "os/exec" "regexp" "strconv" @@ -81,7 +81,7 @@ func parseNetshOutput(output string) (low, hi uint16, err error) { */ matches := netshRegexp.FindAllStringSubmatch(output, -1) if len(matches) != 2 { - return 0, 0, fmt.Errorf("could not parse output of netsh") + return 0, 0, errors.New("could not parse output of netsh") } portstart, err := strconv.Atoi(matches[0][1]) if err != nil { diff --git a/pkg/network/events/network_consumer_others.go b/pkg/network/events/network_consumer_others.go index 32b2e80c93c71d..ba7353f6bd3bc7 100644 --- a/pkg/network/events/network_consumer_others.go +++ b/pkg/network/events/network_consumer_others.go @@ -9,7 +9,7 @@ package events import ( - "fmt" + "errors" "github.com/DataDog/datadog-agent/pkg/eventmonitor" ) @@ -19,7 +19,7 @@ type NetworkConsumer struct{} // Start starts the event consumer (noop) func (n *NetworkConsumer) Start() error { - return fmt.Errorf("network consumer is only supported on linux") + return errors.New("network consumer is only supported on linux") } // Stop stops the event consumer (noop) @@ -32,5 +32,5 @@ func (n *NetworkConsumer) ID() string { // NewNetworkConsumer returns a new NetworkConsumer instance func NewNetworkConsumer(_ *eventmonitor.EventMonitor) (*NetworkConsumer, error) { - return nil, fmt.Errorf("network consumer is only supported on linux") + return nil, errors.New("network consumer is only supported on linux") } diff --git a/pkg/network/filter/packet_source_linux.go b/pkg/network/filter/packet_source_linux.go index d5dfbdb33941d3..d1fa910a4cdc34 100644 --- a/pkg/network/filter/packet_source_linux.go +++ b/pkg/network/filter/packet_source_linux.go @@ -9,6 +9,7 @@ package filter import ( + "errors" "fmt" "os" "reflect" @@ -83,7 +84,7 @@ func NewAFPacketSource(size int, opts ...interface{}) (*AFPacketSource, error) { case OptSnapLen: snapLen = int(o) if snapLen <= 0 || snapLen > 65536 { - return nil, fmt.Errorf("snap len should be between 0 and 65536") + return nil, errors.New("snap len should be between 0 and 65536") } default: return nil, fmt.Errorf("unknown option %+v", opt) @@ -125,11 +126,11 @@ func (p *AFPacketSource) SetEbpf(filter *manager.Probe) error { // Note the filter attachment itself is triggered by the ebpf.Manager f := reflect.ValueOf(p.TPacket).Elem().FieldByName("fd") if !f.IsValid() { - return fmt.Errorf("could not find fd field in TPacket object") + return errors.New("could not find fd field in TPacket object") } if !f.CanInt() { - return fmt.Errorf("fd TPacket field is not an int") + return errors.New("fd TPacket field is not an int") } filter.SocketFD = int(f.Int()) @@ -267,7 +268,7 @@ func afpacketComputeSize(targetSize, snaplen, pageSize int) (frameSize, blockSiz numBlocks = targetSize / blockSize if numBlocks == 0 { - return 0, 0, 0, fmt.Errorf("buffer size is too small") + return 0, 0, 0, errors.New("buffer size is too small") } blockSizeInc := blockSize diff --git a/pkg/network/go/bininspect/dwarf.go b/pkg/network/go/bininspect/dwarf.go index 203f5adb26dbe8..7db41378ca1f67 100644 --- a/pkg/network/go/bininspect/dwarf.go +++ b/pkg/network/go/bininspect/dwarf.go @@ -9,6 +9,7 @@ package bininspect import ( "debug/dwarf" + "errors" "fmt" "maps" "slices" @@ -197,13 +198,13 @@ func (d dwarfInspector) inspectSingleFunctionUsingDWARF(entry *dwarf.Entry) (Fun func (d dwarfInspector) getParameterLocationAtPC(parameterDIE *dwarf.Entry, pc uint64) (ParameterMetadata, error) { typeOffset, ok := parameterDIE.Val(dwarf.AttrType).(dwarf.Offset) if !ok { - return ParameterMetadata{}, fmt.Errorf("no type offset attribute in parameter entry") + return ParameterMetadata{}, errors.New("no type offset attribute in parameter entry") } // Find the location field on the entry locationField := parameterDIE.AttrField(dwarf.AttrLocation) if locationField == nil { - return ParameterMetadata{}, fmt.Errorf("no location field in parameter entry") + return ParameterMetadata{}, errors.New("no location field in parameter entry") } typ, err := dwarfutils.NewTypeFinder(d.dwarfData).FindTypeByOffset(typeOffset) @@ -324,7 +325,7 @@ func (d dwarfInspector) getLoclistEntry(offset int64, pc uint64) (*loclist.Entry } if loclist.Empty() { - return nil, fmt.Errorf("no loclist found for the given program counter") + return nil, errors.New("no loclist found for the given program counter") } // Use 0x0 as the static base @@ -337,5 +338,5 @@ func (d dwarfInspector) getLoclistEntry(offset int64, pc uint64) (*loclist.Entry return entry, nil } - return nil, fmt.Errorf("no loclist entry found") + return nil, errors.New("no loclist entry found") } diff --git a/pkg/network/go/dwarfutils/entry.go b/pkg/network/go/dwarfutils/entry.go index 4238617162d683..ee40f95e756458 100644 --- a/pkg/network/go/dwarfutils/entry.go +++ b/pkg/network/go/dwarfutils/entry.go @@ -7,6 +7,7 @@ package dwarfutils import ( "debug/dwarf" + "errors" "fmt" ) @@ -18,7 +19,7 @@ func GetChildLeafEntries(entryReader *dwarf.Reader, offset dwarf.Offset, tag dwa // Consume the first element from the reader to move it past the parent entry parentEntry, err := entryReader.Next() if err != nil { - return nil, fmt.Errorf("couldn't consume initial parent entry from entry reader") + return nil, errors.New("couldn't consume initial parent entry from entry reader") } if !parentEntry.Children { return nil, nil diff --git a/pkg/network/go/goversion/version.go b/pkg/network/go/goversion/version.go index 15a43d7a9a19ff..961bf61e103a3f 100644 --- a/pkg/network/go/goversion/version.go +++ b/pkg/network/go/goversion/version.go @@ -24,7 +24,7 @@ type GoVersion struct { // NewGoVersion returns a new GoVersion struct func NewGoVersion(rawVersion string) (GoVersion, error) { - version, ok := goversion.Parse(fmt.Sprintf("go%s", rawVersion)) + version, ok := goversion.Parse("go" + rawVersion) if !ok { return GoVersion{}, fmt.Errorf("couldn't parse go version %s", rawVersion) } diff --git a/pkg/network/go/rungo/install.go b/pkg/network/go/rungo/install.go index ef9242979a1f73..e1a262b8291b2a 100644 --- a/pkg/network/go/rungo/install.go +++ b/pkg/network/go/rungo/install.go @@ -62,13 +62,13 @@ type GoInstallation struct { // value as `i.InstallLocation` (if a custom value was given). func (i *GoInstallation) Install(ctx context.Context) (string, []byte, error) { if i.Version == "" { - return "", nil, fmt.Errorf("i.Version is required") + return "", nil, errors.New("i.Version is required") } if i.InstallGopath == "" { - return "", nil, fmt.Errorf("i.InstallGopath is required") + return "", nil, errors.New("i.InstallGopath is required") } if i.InstallGocache == "" { - return "", nil, fmt.Errorf("i.InstallGocache is required") + return "", nil, errors.New("i.InstallGocache is required") } // Run the `go install` command to compile/install the wrapper binary @@ -83,7 +83,7 @@ func (i *GoInstallation) Install(ctx context.Context) (string, []byte, error) { } // Build the path to the wrapper binary, and make sure it exists - wrapperPath := path.Join(i.InstallGopath, "bin", fmt.Sprintf("go%s", i.Version)) + wrapperPath := path.Join(i.InstallGopath, "bin", "go"+i.Version) absWrapperPath, err := filepath.Abs(wrapperPath) if err != nil { return "", nil, fmt.Errorf("error while resolving path to install wrapper binary (expected at %q): %w", wrapperPath, err) diff --git a/pkg/network/go/rungo/matrix/matrix.go b/pkg/network/go/rungo/matrix/matrix.go index 65585fa66b1887..d7528f20a75f33 100644 --- a/pkg/network/go/rungo/matrix/matrix.go +++ b/pkg/network/go/rungo/matrix/matrix.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "log" "os/exec" @@ -60,10 +61,10 @@ func (r *Runner) Run(ctx context.Context) error { return fmt.Errorf("cannot run with negative/zero Parallelism (%d)", r.Parallelism) } if r.GetCommand == nil { - return fmt.Errorf("GetCommand is required") + return errors.New("GetCommand is required") } if r.InstallDirectory == "" { - return fmt.Errorf("InstallDirectory is required") + return errors.New("InstallDirectory is required") } // If the install directory is not absolute, resolve it diff --git a/pkg/network/netlink/conntrack.go b/pkg/network/netlink/conntrack.go index 97bca1306d1de1..90b00d50f748b7 100644 --- a/pkg/network/netlink/conntrack.go +++ b/pkg/network/netlink/conntrack.go @@ -99,7 +99,7 @@ func (c *conntrack) Exists(conn *Con) (bool, error) { return true, nil } - return false, fmt.Errorf("no replies received from netlink call") + return false, errors.New("no replies received from netlink call") } func (c *conntrack) Close() error { diff --git a/pkg/network/netlink/conntrack_integration_test.go b/pkg/network/netlink/conntrack_integration_test.go index 059147bcbb5214..4218e8337133a8 100644 --- a/pkg/network/netlink/conntrack_integration_test.go +++ b/pkg/network/netlink/conntrack_integration_test.go @@ -258,7 +258,7 @@ func testConntrackExists(t *testing.T, laddrIP string, laddrPort int, proto stri ns netns.NsHandle }{ { - desc: fmt.Sprintf("net ns 0, origin exists, proto %s", proto), + desc: "net ns 0, origin exists, proto " + proto, c: Con{ Origin: newIPTuple(laddrIP, "2.2.2.4", uint16(laddrPort), 80, ipProto), }, @@ -266,7 +266,7 @@ func testConntrackExists(t *testing.T, laddrIP string, laddrPort int, proto stri ns: rootNs, }, { - desc: fmt.Sprintf("net ns 0, reply exists, proto %s", proto), + desc: "net ns 0, reply exists, proto " + proto, c: Con{ Reply: newIPTuple("2.2.2.4", laddrIP, 80, uint16(laddrPort), ipProto), }, @@ -274,7 +274,7 @@ func testConntrackExists(t *testing.T, laddrIP string, laddrPort int, proto stri ns: rootNs, }, { - desc: fmt.Sprintf("net ns 0, origin does not exist, proto %s", proto), + desc: "net ns 0, origin does not exist, proto " + proto, c: Con{ Origin: newIPTuple(laddrIP, "2.2.2.3", uint16(laddrPort), 80, ipProto), }, @@ -341,7 +341,7 @@ func testConntrackExists6(t *testing.T, laddrIP string, laddrPort int, proto str ns netns.NsHandle }{ { - desc: fmt.Sprintf("net ns 0, origin exists, proto %s", proto), + desc: "net ns 0, origin exists, proto " + proto, c: Con{ Origin: newIPTuple(laddrIP, "fd00::2", uint16(laddrPort), 80, ipProto), }, @@ -349,7 +349,7 @@ func testConntrackExists6(t *testing.T, laddrIP string, laddrPort int, proto str ns: rootNs, }, { - desc: fmt.Sprintf("net ns 0, reply exists, proto %s", proto), + desc: "net ns 0, reply exists, proto " + proto, c: Con{ Reply: newIPTuple("fd00::2", laddrIP, 80, uint16(laddrPort), ipProto), }, @@ -357,7 +357,7 @@ func testConntrackExists6(t *testing.T, laddrIP string, laddrPort int, proto str ns: rootNs, }, { - desc: fmt.Sprintf("net ns 0, origin does not exist, proto %s", proto), + desc: "net ns 0, origin does not exist, proto " + proto, c: Con{ Origin: newIPTuple(laddrIP, "fd00::1", uint16(laddrPort), 80, ipProto), }, diff --git a/pkg/network/netlink/consumer.go b/pkg/network/netlink/consumer.go index 757a4a52ca0148..4d61e01a06f535 100644 --- a/pkg/network/netlink/consumer.go +++ b/pkg/network/netlink/consumer.go @@ -653,7 +653,7 @@ func (c *Consumer) throttle(numMessages int) error { // we cannot recreate the socket and set a bpf filter on // kernels before 3.15, so we bail here log.Errorf("conntrack sampling not supported on kernel versions < 3.15. Please adjust system_probe_config.conntrack_rate_limit (currently set to %d) to accommodate higher conntrack update rate detected", c.targetRateLimit) - return fmt.Errorf("conntrack sampling rate not supported") + return errors.New("conntrack sampling rate not supported") } // Create new socket with the desired sampling rate diff --git a/pkg/network/netlink/decoding.go b/pkg/network/netlink/decoding.go index aff7d90466fd61..2d51b2e3c93176 100644 --- a/pkg/network/netlink/decoding.go +++ b/pkg/network/netlink/decoding.go @@ -9,6 +9,7 @@ package netlink import ( "encoding/binary" + "errors" "fmt" "net/netip" @@ -199,14 +200,14 @@ func (d *Decoder) unmarshalProto(t *ConTuple) error { func ipv4(b []byte) (netip.Addr, error) { if len(b) != 4 { - return netip.Addr{}, fmt.Errorf("invalid IPv4 size") + return netip.Addr{}, errors.New("invalid IPv4 size") } return netip.AddrFrom4([4]byte(b)), nil } func ipv6(b []byte) (netip.Addr, error) { if len(b) != 16 { - return netip.Addr{}, fmt.Errorf("invalid IPv6 size") + return netip.Addr{}, errors.New("invalid IPv6 size") } return netip.AddrFrom16([16]byte(b)), nil } diff --git a/pkg/network/netlink/decoding_test.go b/pkg/network/netlink/decoding_test.go index b4593425b4ddbe..e60aff14d8422f 100644 --- a/pkg/network/netlink/decoding_test.go +++ b/pkg/network/netlink/decoding_test.go @@ -9,7 +9,7 @@ package netlink import ( "encoding/binary" - "fmt" + "errors" "io" "net/netip" "os" @@ -101,7 +101,7 @@ func loadDumpData() ([]netlink.Message, error) { m := netlink.Message{Data: make([]byte, size)} _, err = io.ReadFull(f, m.Data) if err != nil { - return nil, fmt.Errorf("couldn't read enough data") + return nil, errors.New("couldn't read enough data") } messages = append(messages, m) diff --git a/pkg/network/netlink/testutil/conntrack.go b/pkg/network/netlink/testutil/conntrack.go index 016a2e1805b266..cd5a4b7731923c 100644 --- a/pkg/network/netlink/testutil/conntrack.go +++ b/pkg/network/netlink/testutil/conntrack.go @@ -9,6 +9,7 @@ package testutil import ( + "errors" "fmt" "math/rand" "os" @@ -39,7 +40,7 @@ func SetupDNAT(t *testing.T) { cmds := []string{ fmt.Sprintf("ip link add %s type dummy", linkName), - fmt.Sprintf("ip address add 1.1.1.1 broadcast + dev %s", linkName), + "ip address add 1.1.1.1 broadcast + dev " + linkName, fmt.Sprintf("ip link set %s up", linkName), "iptables -t nat -A OUTPUT --dest 2.2.2.2 -j DNAT --to-destination 1.1.1.1", "iptables -t nat -A PREROUTING --dest 3.3.3.3 -j DNAT --to-destination 1.1.1.1", @@ -58,8 +59,8 @@ func SetupSNAT(t *testing.T) string { cmds := []string{ fmt.Sprintf("ip link add %s type dummy", linkName), - fmt.Sprintf("ip address add 7.7.7.7 broadcast + dev %s", linkName), - fmt.Sprintf("ip address add 6.6.6.6 broadcast + dev %s", linkName), + "ip address add 7.7.7.7 broadcast + dev " + linkName, + "ip address add 6.6.6.6 broadcast + dev " + linkName, fmt.Sprintf("ip link set %s up", linkName), "iptables -t nat -A POSTROUTING -s 6.6.6.6/32 -j SNAT --to-source 7.7.7.7", } @@ -72,7 +73,7 @@ func SetupSNAT(t *testing.T) string { func teardownDNAT(t *testing.T, linkName string) { cmds := []string{ // tear down the testing interface, and iptables rule - fmt.Sprintf("ip link del %s", linkName), + "ip link del " + linkName, // clear out the conntrack table "conntrack -F", } @@ -101,7 +102,7 @@ func SetupDNAT6(t *testing.T) { nettestutil.IP6tablesSave(t) cmds := []string{ fmt.Sprintf("ip link add %s type dummy", linkName), - fmt.Sprintf("ip address add fd00::1 dev %s", linkName), + "ip address add fd00::1 dev " + linkName, fmt.Sprintf("ip link set %s up", linkName), fmt.Sprintf("%s/testdata/wait_if.sh %s", curDir, linkName), "ip -6 route add fd00::2 dev " + ifName, @@ -114,7 +115,7 @@ func SetupDNAT6(t *testing.T) { func teardownDNAT6(t *testing.T, ifName string, linkName string) { cmds := []string{ // tear down the testing interface, and iptables rule - fmt.Sprintf("ip link del %s", linkName), + "ip link del " + linkName, "ip -6 r del fd00::2 dev " + ifName, // clear out the conntrack table @@ -134,7 +135,7 @@ func SetupVethPair(tb testing.TB) (ns string) { cmds := []string{ "ip link add veth1 type veth peer name veth2", - fmt.Sprintf("ip link set veth2 netns %s", ns), + "ip link set veth2 netns " + ns, "ip address add 2.2.2.3/24 dev veth1", fmt.Sprintf("ip -n %s address add 2.2.2.4/24 dev veth2", ns), "ip link set veth1 up", @@ -164,7 +165,7 @@ func SetupVeth6Pair(t *testing.T) (ns string) { cmds := []string{ "ip link add veth1 type veth peer name veth2", - fmt.Sprintf("ip link set veth2 netns %s", ns), + "ip link set veth2 netns " + ns, "ip address add fd00::1/64 dev veth1", fmt.Sprintf("ip -n %s address add fd00::2/64 dev veth2", ns), "ip link set veth1 up", @@ -274,7 +275,7 @@ func AddNS(tb testing.TB) string { func _curDir() (string, error) { _, file, _, ok := runtime.Caller(0) if !ok { - return "", fmt.Errorf("unable to get current file build path") + return "", errors.New("unable to get current file build path") } buildDir := filepath.Dir(file) diff --git a/pkg/network/port.go b/pkg/network/port.go index fbfd3c4ccb45bd..02fb53e1c76f2a 100644 --- a/pkg/network/port.go +++ b/pkg/network/port.go @@ -9,9 +9,9 @@ package network import ( "errors" - "fmt" "os" "path" + "strconv" "strings" "time" @@ -66,7 +66,7 @@ func readState(procRoot string, paths []string, status int64) (map[PortMapping]u seen[ns] = struct{}{} for _, p := range paths { - ports, err := readProcNetWithStatus(path.Join(procRoot, fmt.Sprintf("%d", pid), p), status) + ports, err := readProcNetWithStatus(path.Join(procRoot, strconv.Itoa(pid), p), status) if err != nil { log.Errorf("error reading port state net ns ino=%d pid=%d path=%s status=%d", ns, pid, p, status) continue diff --git a/pkg/network/protocols/events/batch_consumer.go b/pkg/network/protocols/events/batch_consumer.go index c7d0a2b2fc8c73..2235847c448d65 100644 --- a/pkg/network/protocols/events/batch_consumer.go +++ b/pkg/network/protocols/events/batch_consumer.go @@ -81,12 +81,12 @@ func NewBatchConsumer[V any](proto string, ebpf *manager.Manager, callback func( handler := getHandler(proto) if handler == nil { - return nil, fmt.Errorf("unable to detect perf handler. perhaps you forgot to call events.Configure()?") + return nil, errors.New("unable to detect perf handler. perhaps you forgot to call events.Configure()?") } // setup telemetry metricGroup := telemetry.NewMetricGroup( - fmt.Sprintf("usm.%s", proto), + "usm."+proto, telemetry.OptStatsd, ) diff --git a/pkg/network/protocols/events/direct_consumer.go b/pkg/network/protocols/events/direct_consumer.go index 9ecec0aa19c842..acac5df7ed9063 100644 --- a/pkg/network/protocols/events/direct_consumer.go +++ b/pkg/network/protocols/events/direct_consumer.go @@ -62,7 +62,7 @@ func NewDirectConsumer[V any](proto string, callback func(*V), config *config.Co // setup telemetry metricGroup := telemetry.NewMetricGroup( - fmt.Sprintf("usm.%s", proto), + "usm."+proto, telemetry.OptStatsd, ) eventsCount := metricGroup.NewCounter("events_captured") diff --git a/pkg/network/protocols/http/telemetry.go b/pkg/network/protocols/http/telemetry.go index 4fa056f5d068a3..2a2a781643ca24 100644 --- a/pkg/network/protocols/http/telemetry.go +++ b/pkg/network/protocols/http/telemetry.go @@ -47,7 +47,7 @@ type Telemetry struct { // NewTelemetry returns a new Telemetry. func NewTelemetry(protocol string) *Telemetry { - metricGroup := libtelemetry.NewMetricGroup(fmt.Sprintf("usm.%s", protocol)) + metricGroup := libtelemetry.NewMetricGroup("usm." + protocol) metricGroupJoiner := libtelemetry.NewMetricGroup(fmt.Sprintf("usm.%s.joiner", protocol)) return &Telemetry{ diff --git a/pkg/network/protocols/http/testutil/testutil.go b/pkg/network/protocols/http/testutil/testutil.go index 4b476e7dd1ad82..b1157d00572f7f 100644 --- a/pkg/network/protocols/http/testutil/testutil.go +++ b/pkg/network/protocols/http/testutil/testutil.go @@ -10,7 +10,7 @@ package testutil import ( "context" "crypto/tls" - "fmt" + "errors" "io" "net" "net/http" @@ -192,7 +192,7 @@ func GetCertsPaths() (string, string, error) { func CurDir() (string, error) { _, file, _, ok := runtime.Caller(1) if !ok { - return "", fmt.Errorf("unable to get current file build path") + return "", errors.New("unable to get current file build path") } buildDir := filepath.Dir(file) diff --git a/pkg/network/protocols/http2/model_test.go b/pkg/network/protocols/http2/model_test.go index d05b37c8cd2466..6c75e635828ab3 100644 --- a/pkg/network/protocols/http2/model_test.go +++ b/pkg/network/protocols/http2/model_test.go @@ -29,26 +29,26 @@ func TestHTTP2LongPath(t *testing.T) { }{ { name: "Long path with huffman with bigger out buffer", - rawPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path+1)), + rawPath: "/" + strings.Repeat("a", maxHTTP2Path+1), huffmanEnabled: true, }, { name: "Long path with huffman with shorter out buffer", - rawPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path+1)), - expectedPath: fmt.Sprintf("/%s", strings.Repeat("a", 19)), + rawPath: "/" + strings.Repeat("a", maxHTTP2Path+1), + expectedPath: "/" + strings.Repeat("a", 19), huffmanEnabled: true, outBufSize: 20, }, { name: "Long path without huffman with bigger out buffer", - rawPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path+1)), + rawPath: "/" + strings.Repeat("a", maxHTTP2Path+1), // The path is truncated to maxHTTP2Path (including the leading '/') - expectedPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path-1)), + expectedPath: "/" + strings.Repeat("a", maxHTTP2Path-1), }, { name: "Long path without huffman with shorter out buffer", - rawPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path+1)), - expectedPath: fmt.Sprintf("/%s", strings.Repeat("a", 19)), + rawPath: "/" + strings.Repeat("a", maxHTTP2Path+1), + expectedPath: "/" + strings.Repeat("a", 19), outBufSize: 20, }, } diff --git a/pkg/network/protocols/mongo/client.go b/pkg/network/protocols/mongo/client.go index 580acf9be325da..15e9cbeb7de0db 100644 --- a/pkg/network/protocols/mongo/client.go +++ b/pkg/network/protocols/mongo/client.go @@ -10,7 +10,6 @@ package mongo import ( "context" - "fmt" "net" "time" @@ -41,7 +40,7 @@ type Client struct { // NewClient creates a new mongo client func NewClient(opts Options) (*Client, error) { - clientOptions := options.Client().ApplyURI(fmt.Sprintf("mongodb://%s", opts.ServerAddress)) + clientOptions := options.Client().ApplyURI("mongodb://" + opts.ServerAddress) if opts.Username == "" { opts.Username = User } diff --git a/pkg/network/protocols/postgres/server.go b/pkg/network/protocols/postgres/server.go index dbb2e30752ca56..c6c7598c92287b 100644 --- a/pkg/network/protocols/postgres/server.go +++ b/pkg/network/protocols/postgres/server.go @@ -9,7 +9,6 @@ package postgres import ( - "fmt" "io" "os" "path/filepath" @@ -50,7 +49,7 @@ func RunServer(t testing.TB, serverAddr, serverPort string, enableTLS bool) erro "TESTDIR=" + testDataDir, } - scanner, err := globalutils.NewScanner(regexp.MustCompile(fmt.Sprintf(".*listening on IPv4 address \"0.0.0.0\", port %s", serverPort)), globalutils.NoPattern) + scanner, err := globalutils.NewScanner(regexp.MustCompile(".*listening on IPv4 address \"0.0.0.0\", port "+serverPort), globalutils.NoPattern) require.NoError(t, err, "failed to create pattern scanner") dockerCfg := dockerutils.NewComposeConfig( diff --git a/pkg/network/protocols/postgres/telemetry.go b/pkg/network/protocols/postgres/telemetry.go index 3c26d92ada9c2a..682a715e3e8dec 100644 --- a/pkg/network/protocols/postgres/telemetry.go +++ b/pkg/network/protocols/postgres/telemetry.go @@ -8,7 +8,6 @@ package postgres import ( - "fmt" "strconv" "github.com/DataDog/datadog-agent/pkg/network/config" @@ -118,7 +117,7 @@ type Telemetry struct { func createQueryLengthBuckets(metricGroup *libtelemetry.MetricGroup) [numberOfBuckets]*extractionFailureCounter { var buckets [numberOfBuckets]*extractionFailureCounter for i := 0; i < numberOfBuckets; i++ { - buckets[i] = newExtractionFailureCounter(metricGroup, "query_length_bucket"+fmt.Sprint(i+1), libtelemetry.OptStatsd) + buckets[i] = newExtractionFailureCounter(metricGroup, "query_length_bucket"+strconv.Itoa(i+1), libtelemetry.OptStatsd) } return buckets } diff --git a/pkg/network/protocols/redis/telemetry.go b/pkg/network/protocols/redis/telemetry.go index 3de343302a8931..5d30355d94635d 100644 --- a/pkg/network/protocols/redis/telemetry.go +++ b/pkg/network/protocols/redis/telemetry.go @@ -8,8 +8,6 @@ package redis import ( - "fmt" - libtelemetry "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -38,7 +36,7 @@ func newTelemetry() *telemetry { telem.commandDistribution = make(map[CommandType]*libtelemetry.Counter, maxCommand) for command := UnknownCommand; command < maxCommand; command++ { - telem.commandDistribution[command] = metricGroup.NewCounter("total_hits", fmt.Sprintf("command:%s", command.String()), libtelemetry.OptPrometheus) + telem.commandDistribution[command] = metricGroup.NewCounter("total_hits", "command:"+command.String(), libtelemetry.OptPrometheus) } return &telem diff --git a/pkg/network/state_test.go b/pkg/network/state_test.go index 7f4e647631e969..e8f8ec06e4fd80 100644 --- a/pkg/network/state_test.go +++ b/pkg/network/state_test.go @@ -12,6 +12,7 @@ import ( "math" "math/rand" "net/netip" + "strconv" "sync" "syscall" "testing" @@ -806,7 +807,7 @@ func TestRaceConditions(_ *testing.T) { state.GetDelta(c, latestEpochTime(), genConns(nConns), nil, nil) } } - }(fmt.Sprintf("%d", i)) + }(strconv.Itoa(i)) } wg.Wait() diff --git a/pkg/network/tracer/cached_conntrack.go b/pkg/network/tracer/cached_conntrack.go index f3a39ba340ee22..467ce03d1cf0f1 100644 --- a/pkg/network/tracer/cached_conntrack.go +++ b/pkg/network/tracer/cached_conntrack.go @@ -9,7 +9,6 @@ package tracer import ( "errors" - "fmt" "net/netip" "os" "sync" @@ -128,7 +127,7 @@ func (cache *cachedConntrack) ensureConntrack(ino uint64, pid int) (netlink.Conn defer cache.Unlock() if cache.closed { - return nil, fmt.Errorf("cache Close has already been called") + return nil, errors.New("cache Close has already been called") } v, ok := cache.cache.Get(ino) diff --git a/pkg/network/tracer/connection/ebpf_tracer.go b/pkg/network/tracer/connection/ebpf_tracer.go index 4c4f4149586b86..07d298269ae35c 100644 --- a/pkg/network/tracer/connection/ebpf_tracer.go +++ b/pkg/network/tracer/connection/ebpf_tracer.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "io" + "strconv" "sync" "time" "unique" @@ -755,7 +756,7 @@ func (t *ebpfTracer) Collect(ch chan<- prometheus.Metric) { // Collect the TCP failure telemetry for k, v := range t.getTCPFailureTelemetry() { - EbpfTracerTelemetry.tcpFailedConnections.Add(float64(v), fmt.Sprintf("%d", k)) + EbpfTracerTelemetry.tcpFailedConnections.Add(float64(v), strconv.Itoa(int(k))) } } diff --git a/pkg/network/tracer/connection/ebpfless_tracer.go b/pkg/network/tracer/connection/ebpfless_tracer.go index 855ee3c1e084c4..5ed7676ce90601 100644 --- a/pkg/network/tracer/connection/ebpfless_tracer.go +++ b/pkg/network/tracer/connection/ebpfless_tracer.go @@ -8,6 +8,7 @@ package connection import ( + "errors" "fmt" "io" "sync" @@ -172,7 +173,7 @@ func (t *ebpfLessTracer) processConnection( return nil } if !flags.ip4Present && !flags.ip6Present { - return fmt.Errorf("expected to have an IP layer") + return errors.New("expected to have an IP layer") } // don't trace families/protocols that are disabled by configuration @@ -233,7 +234,7 @@ func (t *ebpfLessTracer) processConnection( return err } if direction == network.UNKNOWN { - return fmt.Errorf("could not determine connection direction") + return errors.New("could not determine connection direction") } conn.Direction = direction @@ -422,7 +423,7 @@ func (t *ebpfLessTracer) GetMap(string) (*ebpf.Map, error) { return nil, nil } // DumpMaps (for debugging purpose) returns all maps content by default or selected maps from maps parameter. func (t *ebpfLessTracer) DumpMaps(_ io.Writer, _ ...string) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } // Type returns the type of the underlying ebpf ebpfLessTracer that is currently loaded @@ -431,11 +432,11 @@ func (t *ebpfLessTracer) Type() TracerType { } func (t *ebpfLessTracer) Pause() error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } func (t *ebpfLessTracer) Resume() error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } // Describe returns all descriptions of the collector diff --git a/pkg/network/tracer/connection/fentry/probes.go b/pkg/network/tracer/connection/fentry/probes.go index 9851be10ee7686..28fa2889032453 100644 --- a/pkg/network/tracer/connection/fentry/probes.go +++ b/pkg/network/tracer/connection/fentry/probes.go @@ -8,6 +8,7 @@ package fentry import ( + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/ebpf" @@ -215,7 +216,7 @@ func enableAdvancedUDP(enabled map[string]struct{}) error { } else if _, miss := missing["skb_free_datagram_locked"]; !miss { enableProgram(enabled, skbFreeDatagramLocked) } else { - return fmt.Errorf("missing desired UDP receive kernel functions") + return errors.New("missing desired UDP receive kernel functions") } return nil } diff --git a/pkg/network/tracer/connection/fentry/tracer.go b/pkg/network/tracer/connection/fentry/tracer.go index 55a952dfa3981b..41d1a32adc1c46 100644 --- a/pkg/network/tracer/connection/fentry/tracer.go +++ b/pkg/network/tracer/connection/fentry/tracer.go @@ -40,7 +40,7 @@ func LoadTracer(config *config.Config, mgrOpts manager.Options, connCloseEventHa return nil, nil, fmt.Errorf("failed to check HasTasksRCUExitLockSymbol: %w", err) } if hasPotentialFentryDeadlock { - return nil, nil, fmt.Errorf("unable to load fentry because this kernel version has a potential deadlock (fixed in kernel v6.9+)") + return nil, nil, errors.New("unable to load fentry because this kernel version has a potential deadlock (fixed in kernel v6.9+)") } m := ddebpf.NewManagerWithDefault(&manager.Manager{}, "network", &ebpftelemetry.ErrorsTelemetryModifier{}, connCloseEventHandler) diff --git a/pkg/network/tracer/connection/ssl-uprobes/ebpf_ssl.go b/pkg/network/tracer/connection/ssl-uprobes/ebpf_ssl.go index 4808c333e34f6a..adede384e3e139 100644 --- a/pkg/network/tracer/connection/ssl-uprobes/ebpf_ssl.go +++ b/pkg/network/tracer/connection/ssl-uprobes/ebpf_ssl.go @@ -9,6 +9,7 @@ package ssluprobes import ( + "errors" "fmt" "regexp" "time" @@ -32,18 +33,18 @@ import ( // ValidateSupported returns an error if TLS cert collection can't be supported func ValidateSupported() error { if features.HaveBoundedLoops() != nil { - return fmt.Errorf("TLS cert collection requires bounded loops (linux 5.4+)") + return errors.New("TLS cert collection requires bounded loops (linux 5.4+)") } if features.HaveProgramType(ebpf.RawTracepoint) != nil { - return fmt.Errorf("TLS cert collection requires raw tracepoints (linux 4.17+)") + return errors.New("TLS cert collection requires raw tracepoints (linux 4.17+)") } // pass in EnableCORE: true so we're only checking kernel features. This is because // ConfigureOptions is called before we even know what tracer loaded successfully. // newEbpfTracer properly disables TLS cert collection on prebuilt if !sharedlibraries.IsSupported(&ddebpf.Config{EnableCORE: true}) { - return fmt.Errorf("TLS cert collection requires shared library monitoring (kernel 4.14 on x86, 5.5 on arm64)") + return errors.New("TLS cert collection requires shared library monitoring (kernel 4.14 on x86, 5.5 on arm64)") } hasUretprobeBug, err := kernelbugs.HasUretprobeSyscallSeccompBug() @@ -51,7 +52,7 @@ func ValidateSupported() error { return fmt.Errorf("disabling TLS cert collection due to failure to check for uretprobe syscall seccomp bug: %v", err) } if hasUretprobeBug { - return fmt.Errorf("disabling TLS cert collection due to kernel bug that causes segmentation faults with uretprobes and seccomp filters") + return errors.New("disabling TLS cert collection due to kernel bug that causes segmentation faults with uretprobes and seccomp filters") } return nil } diff --git a/pkg/network/tracer/ebpf_conntracker.go b/pkg/network/tracer/ebpf_conntracker.go index c3695b05436978..7e37a59a03159e 100644 --- a/pkg/network/tracer/ebpf_conntracker.go +++ b/pkg/network/tracer/ebpf_conntracker.go @@ -441,7 +441,7 @@ func getManager(cfg *config.Config, buf io.ReaderAt, opts manager.Options) (*man pid, err := kernel.RootNSPID() if err != nil { - return nil, fmt.Errorf("failed to get system-probe pid in root pid namespace") + return nil, errors.New("failed to get system-probe pid in root pid namespace") } opts.ConstantEditors = append(opts.ConstantEditors, manager.ConstantEditor{ diff --git a/pkg/network/tracer/networkfilter/network_filter.go b/pkg/network/tracer/networkfilter/network_filter.go index e3a2fb42cf96c6..43fd55868e2e50 100644 --- a/pkg/network/tracer/networkfilter/network_filter.go +++ b/pkg/network/tracer/networkfilter/network_filter.go @@ -7,6 +7,7 @@ package networkfilter import ( + "errors" "fmt" "net/netip" "strconv" @@ -148,7 +149,7 @@ func parsePortFilter(pf string) (uint64, uint64, ConnTypeFilter, error) { if err != nil { return 0, 0, connTypeFilter, fmt.Errorf("failed to parse ports: %s", err) } else if lowerPort == 0 || upperPort == 0 { - return 0, 0, connTypeFilter, fmt.Errorf("invalid port 0") + return 0, 0, connTypeFilter, errors.New("invalid port 0") } else if lowerPort > upperPort { return 0, 0, connTypeFilter, fmt.Errorf("invalid port range %d-%d", lowerPort, upperPort) } diff --git a/pkg/network/tracer/offsetguess/conntrack.go b/pkg/network/tracer/offsetguess/conntrack.go index 4cc2c052248646..a139faf480b44e 100644 --- a/pkg/network/tracer/offsetguess/conntrack.go +++ b/pkg/network/tracer/offsetguess/conntrack.go @@ -331,7 +331,7 @@ func newConntrackEventGenerator(ns netns.NsHandle) (*conntrackEventGenerator, er // port 0 means we let the kernel choose a free port var err error - addr := fmt.Sprintf("%s:0", listenIPv4) + addr := listenIPv4 + ":0" err = netnsutil.WithNS(eg.ns, func() error { eg.udpAddr, eg.udpDone, err = newUDPServer(addr) return err diff --git a/pkg/network/tracer/offsetguess/tracer.go b/pkg/network/tracer/offsetguess/tracer.go index 0c66f4295f3b0f..dcf77263a49989 100644 --- a/pkg/network/tracer/offsetguess/tracer.go +++ b/pkg/network/tracer/offsetguess/tracer.go @@ -318,7 +318,7 @@ func GetIPv6LinkLocalAddress() ([]*net.UDPAddr, error) { if len(udpAddrs) > 0 { return udpAddrs, nil } - return nil, fmt.Errorf("no IPv6 link local address found") + return nil, errors.New("no IPv6 link local address found") } // checkAndUpdateCurrentOffset checks the value for the current offset stored @@ -906,7 +906,7 @@ func newTracerEventGenerator(flowi6 bool) (*tracerEventGenerator, error) { // port 0 means we let the kernel choose a free port var err error - addr := fmt.Sprintf("%s:0", listenIPv4) + addr := listenIPv4 + ":0" eg.listener, err = net.Listen("tcp4", addr) if err != nil { return nil, err @@ -978,7 +978,7 @@ func (e *tracerEventGenerator) Generate(status GuessWhat, expected *fieldValues) return err } - bindAddress := fmt.Sprintf("[%s]:9092", addr.String()) + bindAddress := "[" + addr.String() + "]:9092" // Since we connect to a random IP, this will most likely fail. In the unlikely case where it connects // successfully, we close the connection to avoid a leak. @@ -1091,7 +1091,7 @@ func acceptHandler(l net.Listener) { func TCPGetInfo(conn net.Conn) (*unix.TCPInfo, error) { tcpConn, ok := conn.(*net.TCPConn) if !ok { - return nil, fmt.Errorf("not a TCPConn") + return nil, errors.New("not a TCPConn") } sysc, err := tcpConn.SyscallConn() diff --git a/pkg/network/tracer/process_cache_test.go b/pkg/network/tracer/process_cache_test.go index 28ee902be1625e..7631933d672203 100644 --- a/pkg/network/tracer/process_cache_test.go +++ b/pkg/network/tracer/process_cache_test.go @@ -8,7 +8,7 @@ package tracer import ( - "fmt" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -277,7 +277,7 @@ func TestProcessCacheGet(t *testing.T) { } for i, te := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { p, ok := pc.Get(1234, te.ts) assert.Equal(t, te.ok, ok) if !te.ok { diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go index 6b03b8decacd2f..4b32ff1d672a65 100644 --- a/pkg/network/tracer/tracer.go +++ b/pkg/network/tracer/tracer.go @@ -697,7 +697,7 @@ var allStats = []statsComp{ func (t *Tracer) getStats(comps ...statsComp) (map[string]interface{}, error) { if t.state == nil { - return nil, fmt.Errorf("internal state not yet initialized") + return nil, errors.New("internal state not yet initialized") } if len(comps) == 0 { @@ -735,7 +735,7 @@ func (t *Tracer) GetStats() (map[string]interface{}, error) { // DebugNetworkState returns a map with the current tracer's internal state, for debugging func (t *Tracer) DebugNetworkState(clientID string) (map[string]interface{}, error) { if t.state == nil { - return nil, fmt.Errorf("internal state not yet initialized") + return nil, errors.New("internal state not yet initialized") } return t.state.DumpState(clientID), nil } diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index ecf91b35fac910..ebed1781341dfe 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -558,7 +558,7 @@ func (s *TracerSuite) TestConntrackDelays() { _, port, err := net.SplitHostPort(server.Address()) require.NoError(t, err) - c, err := tracertestutil.DialTCP("tcp", fmt.Sprintf("2.2.2.2:%s", port)) + c, err := tracertestutil.DialTCP("tcp", "2.2.2.2:"+port) require.NoError(t, err) defer tracertestutil.GracefulCloseTCP(c) _, err = c.Write([]byte("ping")) @@ -600,7 +600,7 @@ func (s *TracerSuite) TestTranslationBindingRegression() { // Send data to 2.2.2.2 (which should be translated to 1.1.1.1) _, port, err := net.SplitHostPort(server.Address()) require.NoError(t, err) - c, err := tracertestutil.DialTCP("tcp", fmt.Sprintf("2.2.2.2:%s", port)) + c, err := tracertestutil.DialTCP("tcp", "2.2.2.2:"+port) require.NoError(t, err) defer tracertestutil.GracefulCloseTCP(c) _, err = c.Write([]byte("ping")) @@ -919,19 +919,19 @@ func (s *TracerSuite) TestGatewayLookupCrossNamespace() { "ip addr add 2.2.2.1/24 broadcast 2.2.2.255 dev br0", "ip link add veth1 type veth peer name veth2", "ip link set veth1 master br0", - fmt.Sprintf("ip link set veth2 netns %s", ns1), - fmt.Sprintf("ip -n %s addr add 2.2.2.2/24 broadcast 2.2.2.255 dev veth2", ns1), + "ip link set veth2 netns " + ns1, + "ip -n " + ns1 + " addr add 2.2.2.2/24 broadcast 2.2.2.255 dev veth2", "ip link add veth3 type veth peer name veth4", "ip link set veth3 master br0", - fmt.Sprintf("ip link set veth4 netns %s", ns2), - fmt.Sprintf("ip -n %s addr add 2.2.2.3/24 broadcast 2.2.2.255 dev veth4", ns2), + "ip link set veth4 netns " + ns2, + "ip -n " + ns2 + " addr add 2.2.2.3/24 broadcast 2.2.2.255 dev veth4", "ip link set br0 up", "ip link set veth1 up", - fmt.Sprintf("ip -n %s link set veth2 up", ns1), + "ip -n " + ns1 + " link set veth2 up", "ip link set veth3 up", - fmt.Sprintf("ip -n %s link set veth4 up", ns2), - fmt.Sprintf("ip -n %s r add default via 2.2.2.1", ns1), - fmt.Sprintf("ip -n %s r add default via 2.2.2.1", ns2), + "ip -n " + ns2 + " link set veth4 up", + "ip -n " + ns1 + " r add default via 2.2.2.1", + "ip -n " + ns2 + " r add default via 2.2.2.1", "iptables -I POSTROUTING 1 -t nat -s 2.2.2.0/24 ! -d 2.2.2.0/24 -j MASQUERADE", "iptables -I FORWARD -i br0 -j ACCEPT", "iptables -I FORWARD -o br0 -j ACCEPT", @@ -944,7 +944,7 @@ func (s *TracerSuite) TestGatewayLookupCrossNamespace() { network.SubnetForHwAddrFunc = func(hwAddr net.HardwareAddr) (network.Subnet, error) { for _, i := range ifs { if hwAddr.String() == i.HardwareAddr.String() { - return network.Subnet{Alias: fmt.Sprintf("subnet-%s", i.Name)}, nil + return network.Subnet{Alias: "subnet-" + i.Name}, nil } } @@ -1052,7 +1052,7 @@ func (s *TracerSuite) TestGatewayLookupCrossNamespace() { }, 3*time.Second, 100*time.Millisecond) require.NotNil(t, conn.Via) - require.Equal(t, fmt.Sprintf("subnet-%s", ifi.Name), conn.Via.Subnet.Alias) + require.Equal(t, "subnet-"+ifi.Name, conn.Via.Subnet.Alias) }) } @@ -1676,7 +1676,7 @@ func iptablesWrapper(t *testing.T, f func()) { // Init iptables rule to simulate packet loss rule := "INPUT --source 127.0.0.1 -j DROP" - create := strings.Fields(fmt.Sprintf("-I %s", rule)) + create := strings.Fields("-I " + rule) state := testutil.IptablesSave(t) defer testutil.IptablesRestore(t, state) @@ -3220,7 +3220,7 @@ func testTLSCertParsing(t *testing.T, client *http.Client, matcher func(c *netwo usmutils.WaitForProgramsToBeTraced(t, ssluprobes.CNMModuleName, ssluprobes.CNMTLSAttacherName, cmd.Process.Pid, usmutils.ManualTracingFallbackEnabled) - code, _, err := tracertestutil.HTTPGet(client, fmt.Sprintf("https://%s/status/200/foobar", serverAddr)) + code, _, err := tracertestutil.HTTPGet(client, "https://"+serverAddr+"/status/200/foobar") require.NoError(t, err) require.Equal(t, 200, code) diff --git a/pkg/network/tracer/tracer_test.go b/pkg/network/tracer/tracer_test.go index b966dc4ad51e24..e1a00328ac3743 100644 --- a/pkg/network/tracer/tracer_test.go +++ b/pkg/network/tracer/tracer_test.go @@ -930,7 +930,7 @@ type UDPServer struct { func (s *UDPServer) Run(payloadSize int) error { if s.network == "" { - return fmt.Errorf("must set network for UDPServer.Run()") + return errors.New("must set network for UDPServer.Run()") } var err error var ln net.PacketConn @@ -990,7 +990,7 @@ func (s *UDPServer) Shutdown() { func dialUDP(network, address string) (net.Conn, error) { if network == "" { - return nil, fmt.Errorf("must set network to dialUDP") + return nil, errors.New("must set network to dialUDP") } conn, err := net.DialTimeout(network, address, 50*time.Millisecond) if err != nil { diff --git a/pkg/network/tracer/utils_linux.go b/pkg/network/tracer/utils_linux.go index afb6ca29b64160..f1c671ca19dc9b 100644 --- a/pkg/network/tracer/utils_linux.go +++ b/pkg/network/tracer/utils_linux.go @@ -88,6 +88,6 @@ func verifyOSVersion(kernelCode kernel.Version, platform string, exclusionList [ return true, nil } errMsg := fmt.Sprintf("Kernel unsupported (%s) - ", kernelCode) - errMsg += fmt.Sprintf("required functions missing: %s", strings.Join(missingFuncs, ", ")) + errMsg += "required functions missing: " + strings.Join(missingFuncs, ", ") return false, errors.New(errMsg) } diff --git a/pkg/network/usm/ebpf_main.go b/pkg/network/usm/ebpf_main.go index 3bc63fa43c91e3..ce319ff7b89744 100644 --- a/pkg/network/usm/ebpf_main.go +++ b/pkg/network/usm/ebpf_main.go @@ -531,10 +531,10 @@ func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) er func getAssetName(module string, debug bool) string { if debug { - return fmt.Sprintf("%s-debug.o", module) + return module + "-debug.o" } - return fmt.Sprintf("%s.o", module) + return module + ".o" } func (e *ebpfProgram) dumpMapsHandler(w io.Writer, _ *manager.Manager, mapName string, currentMap *ebpf.Map) { diff --git a/pkg/network/usm/ebpf_ssl_test.go b/pkg/network/usm/ebpf_ssl_test.go index fe1f5fdc7fdd8d..8d5c005fac73e7 100644 --- a/pkg/network/usm/ebpf_ssl_test.go +++ b/pkg/network/usm/ebpf_ssl_test.go @@ -10,7 +10,6 @@ package usm import ( "context" "errors" - "fmt" nethttp "net/http" "os/exec" "path/filepath" @@ -57,7 +56,7 @@ func testArch(t *testing.T, arch string) { // Named site-packages/ddtrace since it is used from servicediscovery tests too. libmmap := filepath.Join(curDir, "testdata", "site-packages", "ddtrace") - lib := filepath.Join(libmmap, fmt.Sprintf("libssl.so.%s", arch)) + lib := filepath.Join(libmmap, "libssl.so."+arch) monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) require.NotNil(t, monitor) diff --git a/pkg/network/usm/maps/pid_validator.go b/pkg/network/usm/maps/pid_validator.go index ac741185346c6d..89bd7b90844f56 100644 --- a/pkg/network/usm/maps/pid_validator.go +++ b/pkg/network/usm/maps/pid_validator.go @@ -10,6 +10,7 @@ package maps import ( "fmt" "os" + "strconv" "unsafe" "github.com/cilium/ebpf" @@ -26,7 +27,7 @@ func extractPID(pidTGID uint64) uint32 { // It uses kernel.HostProc() to correctly handle containerized environments // where the agent runs in a container but needs to check PIDs from the host namespace. func pidExists(pid uint32) bool { - _, err := os.Stat(kernel.HostProc(fmt.Sprintf("%d", pid))) + _, err := os.Stat(kernel.HostProc(strconv.FormatUint(uint64(pid), 10))) return err == nil } diff --git a/pkg/network/usm/maps/types.go b/pkg/network/usm/maps/types.go index 694f591398e886..5d828ed2330802 100644 --- a/pkg/network/usm/maps/types.go +++ b/pkg/network/usm/maps/types.go @@ -8,7 +8,10 @@ // Package maps provides eBPF map leak detection for USM package maps -import "fmt" +import ( + "fmt" + "strings" +) // MapLeakInfo contains leak detection results for a single eBPF map type MapLeakInfo struct { @@ -55,18 +58,20 @@ func (m *MapLeakInfo) HasLeaks() bool { // String returns a human-readable summary of the report func (r *LeakDetectionReport) String() string { - result := "USM eBPF Map Leak Detection (PID-Keyed Maps)\n" - result += "=============================================\n\n" + var builder strings.Builder + builder.WriteString("USM eBPF Map Leak Detection (PID-Keyed Maps)\n") + builder.WriteString("=============================================\n\n") for _, mapInfo := range r.Maps { - result += mapInfo.String() + "\n" + builder.WriteString(mapInfo.String()) + builder.WriteString("\n") if mapInfo.HasLeaks() && len(mapInfo.DeadPIDs) > 0 { - result += fmt.Sprintf(" - Dead PIDs: %v\n", mapInfo.DeadPIDs) + fmt.Fprintf(&builder, " - Dead PIDs: %v\n", mapInfo.DeadPIDs) } } - result += fmt.Sprintf("\nSummary: %d leaked entries found across %d maps\n", + fmt.Fprintf(&builder, "\nSummary: %d leaked entries found across %d maps\n", r.TotalLeakedEntries, r.TotalMapsChecked) - return result + return builder.String() } diff --git a/pkg/network/usm/monitor.go b/pkg/network/usm/monitor.go index 2e039aff447312..9527167d982189 100644 --- a/pkg/network/usm/monitor.go +++ b/pkg/network/usm/monitor.go @@ -86,7 +86,7 @@ func NewMonitor(c *config.Config, connectionProtocolMap *ebpf.Map, statsd statsd filter, _ := mgr.GetProbe(manager.ProbeIdentificationPair{EBPFFuncName: protocolDispatcherSocketFilterFunction, UID: probeUID}) if filter == nil { - return nil, fmt.Errorf("error retrieving socket filter") + return nil, errors.New("error retrieving socket filter") } ddebpf.AddNameMappings(mgr.Manager.Manager, "usm_monitor") @@ -124,7 +124,7 @@ func (m *Monitor) Start() error { defer func() { if err != nil { if errors.Is(err, syscall.ENOMEM) { - err = fmt.Errorf("could not enable usm monitoring: not enough memory to attach http ebpf socket filter. please consider raising the limit via sysctl -w net.core.optmem_max=") + err = errors.New("could not enable usm monitoring: not enough memory to attach http ebpf socket filter. please consider raising the limit via sysctl -w net.core.optmem_max=") } else { err = fmt.Errorf("could not enable USM: %s", err) } diff --git a/pkg/network/usm/monitor_test.go b/pkg/network/usm/monitor_test.go index 0b5bce6930c124..e037d8d575d6e0 100644 --- a/pkg/network/usm/monitor_test.go +++ b/pkg/network/usm/monitor_test.go @@ -72,7 +72,7 @@ var ( func TestMonitorProtocolFail(t *testing.T) { failingStartupMock := func() error { - return fmt.Errorf("mock error") + return errors.New("mock error") } testCases := []struct { @@ -127,7 +127,7 @@ func (s *HTTPTestSuite) TestHTTPStats() { monitor := setupUSMTLSMonitor(t, getHTTPCfg(), useExistingConsumer) - resp, err := nethttp.Get(fmt.Sprintf("http://%s/%d/test", serverAddr, nethttp.StatusNoContent)) + resp, err := nethttp.Get("http://" + serverAddr + "/" + strconv.Itoa(nethttp.StatusNoContent) + "/test") require.NoError(t, err) _ = resp.Body.Close() srvDoneFn() @@ -165,7 +165,7 @@ func (s *HTTPTestSuite) TestHTTPMonitorLoadWithIncompleteBuffers() { }) fastSrvDoneFn := testutil.HTTPServer(t, fastServerAddr, testutil.Options{}) - abortedRequestFn := requestGenerator(t, fmt.Sprintf("%s/ignore", slowServerAddr), emptyBody) + abortedRequestFn := requestGenerator(t, slowServerAddr+"/ignore", emptyBody) wg := sync.WaitGroup{} abortedRequests := make(chan *nethttp.Request, 100) for i := 0; i < 100; i++ { @@ -670,7 +670,7 @@ func cleanProtocolMaps(t *testing.T, protocolName string, manager *manager.Manag func cleanMaps(t *testing.T, protocolName string, maps map[string]*ebpf.Map) { for name, m := range maps { - if !strings.Contains(name, protocolName) || strings.Contains(name, fmt.Sprintf("%s_batch", protocolName)) { + if !strings.Contains(name, protocolName) || strings.Contains(name, protocolName+"_batch") { continue } cleanMapEntries(t, m) @@ -750,7 +750,7 @@ func isPercpu(mapType ebpf.MapType) bool { } func generateMockMap(t *testing.T, mapType ebpf.MapType) (string, *ebpf.Map) { - name := fmt.Sprintf("test_%s", mapType.String()) + name := "test_" + mapType.String() m, err := ebpf.NewMap(&ebpf.MapSpec{ Name: name, Type: mapType, diff --git a/pkg/network/usm/monitor_tls_test.go b/pkg/network/usm/monitor_tls_test.go index 65dfd072f3026c..6a7f651d4e7280 100644 --- a/pkg/network/usm/monitor_tls_test.go +++ b/pkg/network/usm/monitor_tls_test.go @@ -129,7 +129,7 @@ func (s *tlsSuite) TestHTTPSViaLibraryIntegration() { rawout, err := exec.Command("docker", "inspect", "-f", "{{.State.Pid}}", "musl-alpine-1").Output() require.NoError(t, err) containerPid := strings.TrimSpace(string(rawout)) - containerRoot := fmt.Sprintf("/proc/%s/root", containerPid) + containerRoot := "/proc/" + containerPid + "/root" // We start curl with chroot instead of via docker run since // docker run forks and so `testHTTPSLibrary` wouldn't have the @@ -1024,7 +1024,7 @@ func testNodeJSSegfaultPrevention(t *testing.T, usmMonitor *Monitor, nodeJSPID u initialPID := nodeJSPID // Create client and make HTTPS requests to trigger potential uretprobe usage - client, requestFn := simpleGetRequestsGenerator(t, fmt.Sprintf("localhost:%s", serverPort)) + client, requestFn := simpleGetRequestsGenerator(t, "localhost:"+serverPort) // Make several requests that would normally trigger uretprobe attachment for i := 0; i < 5; i++ { @@ -1067,7 +1067,7 @@ func testNodeJSNormalMonitoring(t *testing.T, usmMonitor *Monitor, nodeJSPID uin utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, nodeJsAttacherName, int(nodeJSPID), utils.ManualTracingFallbackEnabled) // This maps will keep track of whether the tracer saw this request already or not - client, requestFn := simpleGetRequestsGenerator(t, fmt.Sprintf("localhost:%s", serverPort)) + client, requestFn := simpleGetRequestsGenerator(t, "localhost:"+serverPort) var requests []*nethttp.Request for i := 0; i < expectedOccurrences; i++ { @@ -1166,7 +1166,7 @@ func testOpenSSLNormalMonitoring(t *testing.T, usmMonitor *Monitor, pythonPID ui utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, UsmTLSAttacherName, int(pythonPID), utils.ManualTracingFallbackEnabled) // This maps will keep track of whether the tracer saw this request already or not - client, requestFn := simpleGetRequestsGenerator(t, fmt.Sprintf("localhost:%s", serverPort)) + client, requestFn := simpleGetRequestsGenerator(t, "localhost:"+serverPort) var requests []*nethttp.Request for i := 0; i < expectedOccurrences; i++ { diff --git a/pkg/network/usm/postgres_monitor_test.go b/pkg/network/usm/postgres_monitor_test.go index 2e0da863e3e6fb..ff0490df3ea9b5 100644 --- a/pkg/network/usm/postgres_monitor_test.go +++ b/pkg/network/usm/postgres_monitor_test.go @@ -52,12 +52,12 @@ const ( ) var ( - longCreateQuery = fmt.Sprintf("CREATE TABLE %s (id SERIAL PRIMARY KEY, foo TEXT)", strings.Repeat("table_", repeatCount)) - longDropeQuery = fmt.Sprintf("DROP TABLE IF EXISTS %s", strings.Repeat("table_", repeatCount)) + longCreateQuery = "CREATE TABLE " + strings.Repeat("table_", repeatCount) + " (id SERIAL PRIMARY KEY, foo TEXT)" + longDropeQuery = "DROP TABLE IF EXISTS " + strings.Repeat("table_", repeatCount) ) func createInsertQuery(values ...string) string { - return fmt.Sprintf("INSERT INTO dummy (foo) VALUES ('%s')", strings.Join(values, "'), ('")) + return "INSERT INTO dummy (foo) VALUES ('" + strings.Join(values, "'), ('") + "')" } func generateTestValues(startingIndex, count int) []string { diff --git a/pkg/network/usm/procnet/procnet.go b/pkg/network/usm/procnet/procnet.go index 3a5015ec29802a..5eab2d455d9221 100644 --- a/pkg/network/usm/procnet/procnet.go +++ b/pkg/network/usm/procnet/procnet.go @@ -15,7 +15,6 @@ package procnet import ( - "fmt" "net/netip" "os" "path/filepath" @@ -60,8 +59,8 @@ func GetTCPConnections() []TCPConnection { } if _, ok := seenNS[ino]; !ok { - populateIndex(connByInode, ino, filepath.Join(procRoot, fmt.Sprintf("%d", pid), "net", "tcp")) - populateIndex(connByInode, ino, filepath.Join(procRoot, fmt.Sprintf("%d", pid), "net", "tcp6")) + populateIndex(connByInode, ino, filepath.Join(procRoot, strconv.Itoa(pid), "net", "tcp")) + populateIndex(connByInode, ino, filepath.Join(procRoot, strconv.Itoa(pid), "net", "tcp6")) } seenNS[ino] = struct{}{} @@ -108,7 +107,7 @@ func populateIndex(connByInode map[int]TCPConnection, ino uint32, file string) { // original `connsByInode` map size because one TCP socket can potentially "map" // to multiple (PID, FD) pairs (eg. forked processes etc). func matchFDWithSocket(procRoot string, pid int, connByInode map[int]TCPConnection, conns []TCPConnection) []TCPConnection { - fdsPath := filepath.Join(procRoot, fmt.Sprintf("%d", pid), "fd") + fdsPath := filepath.Join(procRoot, strconv.Itoa(pid), "fd") fdsDir, err := os.Open(fdsPath) if err != nil { return conns diff --git a/pkg/network/usm/sharedlibraries/ebpf.go b/pkg/network/usm/sharedlibraries/ebpf.go index 5ed00e21e8d192..770ca08ac4e425 100644 --- a/pkg/network/usm/sharedlibraries/ebpf.go +++ b/pkg/network/usm/sharedlibraries/ebpf.go @@ -12,6 +12,7 @@ import ( "fmt" "os" "runtime" + "strconv" "strings" "sync" "unsafe" @@ -184,7 +185,7 @@ func (e *EbpfProgram) setupManagerAndPerfHandlers() error { continue } - mapName := fmt.Sprintf("%s_%s", string(libset), sharedLibrariesPerfMap) + mapName := string(libset) + "_" + sharedLibrariesPerfMap mode := perf.UpgradePerfBuffers(perfBufferSize, dataChannelSize, perf.Watermark(1), ringBufferSize) perfHandler, err := perf.NewEventHandler(mapName, handler.handleEvent, mode, @@ -455,12 +456,12 @@ func (e *EbpfProgram) init(buf bytecode.AssetReader, options manager.Options) er } constEd := manager.ConstantEditor{ - Name: fmt.Sprintf("%s_libset_enabled", string(libset)), + Name: string(libset) + "_libset_enabled", Value: value, } options.ConstantEditors = append(options.ConstantEditors, constEd) - enabledMsgs = append(enabledMsgs, fmt.Sprintf("%s=%d", libset, value)) + enabledMsgs = append(enabledMsgs, string(libset)+"="+strconv.FormatUint(value, 10)) } log.Infof("loading shared libraries program with libsets enabled: %s", strings.Join(enabledMsgs, ", ")) @@ -549,7 +550,7 @@ func (e *EbpfProgram) initializeProbes() { // Tracing represents fentry/fexit probes. tracingProbes := []manager.ProbeIdentificationPair{ { - EBPFFuncName: fmt.Sprintf("do_sys_%s_exit", openat2SysCall), + EBPFFuncName: "do_sys_" + openat2SysCall + "_exit", UID: probeUID, }, } @@ -568,7 +569,7 @@ func (e *EbpfProgram) initializeProbes() { for _, probe := range openatProbes { for _, traceType := range traceTypes { tpProbes = append(tpProbes, manager.ProbeIdentificationPair{ - EBPFFuncName: fmt.Sprintf("tracepoint__syscalls__sys_%s_%s", traceType, probe), + EBPFFuncName: "tracepoint__syscalls__sys_" + traceType + "_" + probe, UID: probeUID, }) } @@ -619,10 +620,10 @@ func (e *EbpfProgram) initializeProbes() { func getAssetName(module string, debug bool) string { if debug { - return fmt.Sprintf("%s-debug.o", module) + return module + "-debug.o" } - return fmt.Sprintf("%s.o", module) + return module + ".o" } // ToBytes converts the libpath to a byte array containing the path diff --git a/pkg/network/usm/sharedlibraries/testutil/testutil.go b/pkg/network/usm/sharedlibraries/testutil/testutil.go index 81b12a98640405..417efcbaaf39a0 100644 --- a/pkg/network/usm/sharedlibraries/testutil/testutil.go +++ b/pkg/network/usm/sharedlibraries/testutil/testutil.go @@ -9,7 +9,7 @@ package testutil import ( - "fmt" + "errors" "os/exec" "regexp" "sync" @@ -61,7 +61,7 @@ func OpenFromProcess(t *testing.T, programExecutable string, paths ...string) (* case <-time.After(time.Second * 5): patternScanner.PrintLogs(t) // please don't use t.Fatalf() here as we could test if it failed later - return nil, fmt.Errorf("couldn't launch process in time") + return nil, errors.New("couldn't launch process in time") } } } diff --git a/pkg/network/usm/usm_http2_monitor_test.go b/pkg/network/usm/usm_http2_monitor_test.go index 9b1801abeb8edb..62a562c6c85074 100644 --- a/pkg/network/usm/usm_http2_monitor_test.go +++ b/pkg/network/usm/usm_http2_monitor_test.go @@ -2115,7 +2115,7 @@ func getExpectedOutcomeForPathWithRepeatedChars() map[usmhttp.Key]captureRange { for i := 1; i < 100; i++ { expected[usmhttp.Key{ Path: usmhttp.Path{ - Content: usmhttp.Interner.GetString(fmt.Sprintf("/%s", strings.Repeat("a", i))), + Content: usmhttp.Interner.GetString("/" + strings.Repeat("a", i)), }, Method: usmhttp.MethodPost, }] = captureRange{ diff --git a/pkg/network/usm/utils/file_registry.go b/pkg/network/usm/utils/file_registry.go index c729fd327a68a4..6b6652e58e2bcf 100644 --- a/pkg/network/usm/utils/file_registry.go +++ b/pkg/network/usm/utils/file_registry.go @@ -435,7 +435,7 @@ type registryTelemetry struct { func newRegistryTelemetry(programName string) registryTelemetry { metricGroup := telemetry.NewMetricGroup( "usm.file_registry", - fmt.Sprintf("program:%s", programName), + "program:"+programName, telemetry.OptPrometheus, ) diff --git a/pkg/network/usm/utils/file_registry_test.go b/pkg/network/usm/utils/file_registry_test.go index 1206420f5ccd53..f2a48da89c83e0 100644 --- a/pkg/network/usm/utils/file_registry_test.go +++ b/pkg/network/usm/utils/file_registry_test.go @@ -215,7 +215,7 @@ const ( func TestFailedRegistration(t *testing.T) { // Create a callback recorder that returns an error on purpose registerRecorder := new(CallbackRecorder) - registerRecorder.ReturnError = fmt.Errorf("failed registration") + registerRecorder.ReturnError = errors.New("failed registration") registerCallback := registerRecorder.Callback() unregisterRecorder := new(CallbackRecorder) @@ -256,7 +256,7 @@ func TestFailedRegistration(t *testing.T) { func TestShortLivedProcess(t *testing.T) { // Create a callback recorder that returns an error on purpose registerRecorder := new(CallbackRecorder) - registerRecorder.ReturnError = fmt.Errorf("failed registration") + registerRecorder.ReturnError = errors.New("failed registration") recorderCallback := registerRecorder.Callback() unregisterRecorder := new(CallbackRecorder) diff --git a/pkg/networkconfigmanagement/config/config.go b/pkg/networkconfigmanagement/config/config.go index 099700cb622e55..0e1720884627a1 100644 --- a/pkg/networkconfigmanagement/config/config.go +++ b/pkg/networkconfigmanagement/config/config.go @@ -10,6 +10,7 @@ package config import ( "encoding/json" + "errors" "fmt" "net" "net/url" @@ -248,7 +249,7 @@ func (ic *InitConfig) Validate() error { ic.Namespace = namespace if ic.MinCollectionInterval <= 0 { - return fmt.Errorf("min_collection_interval must be greater than zero") + return errors.New("min_collection_interval must be greater than zero") } // if SSH configs exist, ensure they're valid @@ -307,7 +308,7 @@ func (di *DeviceInstance) applyDefaults() { func (di *DeviceInstance) hasRequiredFields() error { // check for missing fields that are required for a device instance if di.IPAddress == "" { - return fmt.Errorf("ip_address is required") + return errors.New("ip_address is required") } authBaseString := "auth is required: missing %s for device %s" if di.Auth.Username == "" { @@ -336,7 +337,7 @@ func (sc *SSHConfig) validate() error { func (sc *SSHConfig) hasRequiredFields() error { // must have at least a known paths specified or skip verification (insecure, only for development/testing purposes) if sc.KnownHostsPath == "" && !sc.InsecureSkipVerify { - return fmt.Errorf("no SSH host key verification configured: set known_hosts_path or enable insecure_skip_verify") + return errors.New("no SSH host key verification configured: set known_hosts_path or enable insecure_skip_verify") } return nil } diff --git a/pkg/networkconfigmanagement/profile/profile_processing_test.go b/pkg/networkconfigmanagement/profile/profile_processing_test.go index dd269dabb60170..5ba3f33cad2428 100644 --- a/pkg/networkconfigmanagement/profile/profile_processing_test.go +++ b/pkg/networkconfigmanagement/profile/profile_processing_test.go @@ -10,7 +10,7 @@ package profile import ( "bufio" "bytes" - "fmt" + "errors" "strings" "testing" @@ -94,7 +94,7 @@ func Test_validateOutput(t *testing.T) { profile: newTestProfile(), commandType: Startup, configBytes: []byte(exampleConfig), - expected: fmt.Errorf("no metadata found for command type startup in profile test"), + expected: errors.New("no metadata found for command type startup in profile test"), }, } for _, tt := range tests { diff --git a/pkg/networkconfigmanagement/profile/profile_test_utils.go b/pkg/networkconfigmanagement/profile/profile_test_utils.go index 1698c13125c3bf..bdfeb537c31aa9 100644 --- a/pkg/networkconfigmanagement/profile/profile_test_utils.go +++ b/pkg/networkconfigmanagement/profile/profile_test_utils.go @@ -209,7 +209,7 @@ func getRunningScrubber() *scrubber.Scrubber { sc := scrubber.New() sc.AddReplacer(scrubber.SingleLine, scrubber.Replacer{ Regex: regexp.MustCompile(`(username .+ (password|secret) \d) .+`), - Repl: []byte(fmt.Sprintf(`$1 %s`, "")), + Repl: []byte("$1 " + ""), }) return sc } diff --git a/pkg/networkconfigmanagement/remote/ssh.go b/pkg/networkconfigmanagement/remote/ssh.go index ce0d88df0acd40..2b901af70c40f9 100644 --- a/pkg/networkconfigmanagement/remote/ssh.go +++ b/pkg/networkconfigmanagement/remote/ssh.go @@ -8,6 +8,7 @@ package remote import ( + "errors" "fmt" "io" "os" @@ -74,7 +75,7 @@ func buildHostKeyCallback(config *ncmconfig.SSHConfig) (ssh.HostKeyCallback, err log.Warnf("SSH host key verification is disabled - connects are insecure!") return ssh.InsecureIgnoreHostKey(), nil } - return nil, fmt.Errorf("No SSH host key configured: set known_hosts file path or enable insecure_skip_verify") + return nil, errors.New("No SSH host key configured: set known_hosts file path or enable insecure_skip_verify") } func buildAuthMethods(auth ncmconfig.AuthCredentials) ([]ssh.AuthMethod, error) { @@ -103,7 +104,7 @@ func buildAuthMethods(auth ncmconfig.AuthCredentials) ([]ssh.AuthMethod, error) } if len(methods) == 0 { - return nil, fmt.Errorf("no SSH authentication methods configured") + return nil, errors.New("no SSH authentication methods configured") } return methods, nil @@ -196,7 +197,7 @@ func isTransientSSH(err error) bool { // CombinedOutput runs a command using the SSH session and returns its output func (s *SSHSession) CombinedOutput(cmd string) ([]byte, error) { if s.session == nil { - return nil, fmt.Errorf("SSH session is nil") + return nil, errors.New("SSH session is nil") } return s.session.CombinedOutput(cmd) } diff --git a/pkg/networkconfigmanagement/remote/ssh_test.go b/pkg/networkconfigmanagement/remote/ssh_test.go index 7fb8961aa86c8b..9e235bf97a4a67 100644 --- a/pkg/networkconfigmanagement/remote/ssh_test.go +++ b/pkg/networkconfigmanagement/remote/ssh_test.go @@ -14,6 +14,7 @@ import ( "crypto/rsa" "encoding/base64" "encoding/pem" + "errors" "fmt" "os" "path/filepath" @@ -78,7 +79,7 @@ end` func TestSSHClient_RetrieveConfig_SessionCreationFailure(t *testing.T) { client := &MockSSHClient{ - sessionError: fmt.Errorf("failed to create SSH session"), + sessionError: errors.New("failed to create SSH session"), } _, err := client.RetrieveRunningConfig() @@ -89,7 +90,7 @@ func TestSSHClient_RetrieveConfig_SessionCreationFailure(t *testing.T) { func TestSSHClient_RetrieveConfig_CommandExecutionFailure(t *testing.T) { session := &mockSSHSession{ - err: fmt.Errorf("command execution failed"), + err: errors.New("command execution failed"), } client := &MockSSHClient{ @@ -181,7 +182,7 @@ func TestBuildHostKeyCallback(t *testing.T) { }, } for _, tt := range tests { - t.Run(fmt.Sprint(tt.name), func(t *testing.T) { + t.Run(tt.name, func(t *testing.T) { // initialize capturing logging var b bytes.Buffer w := bufio.NewWriter(&b) diff --git a/pkg/networkdevice/profile/profiledefinition/validation.go b/pkg/networkdevice/profile/profiledefinition/validation.go index cf450eee4924e8..b2914d548f2b65 100644 --- a/pkg/networkdevice/profile/profiledefinition/validation.go +++ b/pkg/networkdevice/profile/profiledefinition/validation.go @@ -139,7 +139,7 @@ func ValidateEnrichMetadata(metadata MetadataConfig) []string { for resName := range metadata { _, isValidRes := validMetadataResources[resName] if !isValidRes { - errors = append(errors, fmt.Sprintf("invalid resource: %s", resName)) + errors = append(errors, "invalid resource: "+resName) } else { res := metadata[resName] for fieldName := range res.Fields { diff --git a/pkg/networkpath/payload/utils.go b/pkg/networkpath/payload/utils.go index d709fe33d89b11..1210e57ab8c87a 100644 --- a/pkg/networkpath/payload/utils.go +++ b/pkg/networkpath/payload/utils.go @@ -6,6 +6,7 @@ package payload import ( + "errors" "fmt" "github.com/google/uuid" @@ -20,7 +21,7 @@ func NewPathtraceID() string { // Returns an error if any run does not have a valid destination IP address. func ValidateNetworkPath(path *NetworkPath) error { if path == nil { - return fmt.Errorf("invalid nil path") + return errors.New("invalid nil path") } if len(path.Traceroute.Runs) == 0 { diff --git a/pkg/obfuscate/credit_cards_test.go b/pkg/obfuscate/credit_cards_test.go index 7024b3b737d6c1..f4e3b01a6648e7 100644 --- a/pkg/obfuscate/credit_cards_test.go +++ b/pkg/obfuscate/credit_cards_test.go @@ -6,7 +6,7 @@ package obfuscate import ( - "fmt" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -112,7 +112,7 @@ func TestIINValidCardPrefix(t *testing.T) { {594388, false, false}, {219899, false, false}, } { - t.Run(fmt.Sprintf("%d", tt.in), func(t *testing.T) { + t.Run(strconv.Itoa(tt.in), func(t *testing.T) { maybe, yes := validCardPrefix(tt.in) assert.Equal(t, maybe, tt.maybe) assert.Equal(t, yes, tt.yes) diff --git a/pkg/obfuscate/sql_test.go b/pkg/obfuscate/sql_test.go index b5ba0d8746d869..19127e06e1a5e5 100644 --- a/pkg/obfuscate/sql_test.go +++ b/pkg/obfuscate/sql_test.go @@ -1447,7 +1447,7 @@ in the middle'`, } for _, c := range cases { - t.Run(fmt.Sprintf("tokenize_%s", c.str), func(t *testing.T) { + t.Run("tokenize_"+c.str, func(t *testing.T) { tokenizer := NewSQLTokenizer(c.str, false, nil) kind, buffer := tokenizer.Scan() assert.Equal(t, c.expectedKind, kind) @@ -1576,7 +1576,7 @@ in the middle'`, } for _, c := range cases { - t.Run(fmt.Sprintf("tokenize_%s", c.str), func(t *testing.T) { + t.Run("tokenize_"+c.str, func(t *testing.T) { tokenizer := NewSQLTokenizer(c.str, true, nil) tokenizer.literalEscapes = true kind, buffer := tokenizer.Scan() diff --git a/pkg/opentelemetry-mapping-go/inframetadata/reporter_test.go b/pkg/opentelemetry-mapping-go/inframetadata/reporter_test.go index 1d9cb135331a0f..a672287135d199 100644 --- a/pkg/opentelemetry-mapping-go/inframetadata/reporter_test.go +++ b/pkg/opentelemetry-mapping-go/inframetadata/reporter_test.go @@ -7,7 +7,7 @@ package inframetadata import ( "context" - "fmt" + "errors" "testing" "time" @@ -81,7 +81,7 @@ type pusher struct { func (p *pusher) Push(_ context.Context, md payload.HostMetadata) error { p.md = md close(p.ch) - return fmt.Errorf("network error") + return errors.New("network error") } func TestRun(t *testing.T) { diff --git a/pkg/opentelemetry-mapping-go/otlp/attributes/gcp/gcp.go b/pkg/opentelemetry-mapping-go/otlp/attributes/gcp/gcp.go index 492fdaff4ca17c..a61e75218932be 100644 --- a/pkg/opentelemetry-mapping-go/otlp/attributes/gcp/gcp.go +++ b/pkg/opentelemetry-mapping-go/otlp/attributes/gcp/gcp.go @@ -60,19 +60,19 @@ func HostInfoFromAttrs(attrs pcommon.Map) (hostInfo *HostInfo) { hostInfo = &HostInfo{} if hostID, ok := attrs.Get(string(conventions.HostIDKey)); ok { - hostInfo.GCPTags = append(hostInfo.GCPTags, fmt.Sprintf("instance-id:%s", hostID.Str())) + hostInfo.GCPTags = append(hostInfo.GCPTags, "instance-id:"+hostID.Str()) } if cloudZone, ok := attrs.Get(string(conventions.CloudAvailabilityZoneKey)); ok { - hostInfo.GCPTags = append(hostInfo.GCPTags, fmt.Sprintf("zone:%s", cloudZone.Str())) + hostInfo.GCPTags = append(hostInfo.GCPTags, "zone:"+cloudZone.Str()) } if hostType, ok := attrs.Get(string(conventions.HostTypeKey)); ok { - hostInfo.GCPTags = append(hostInfo.GCPTags, fmt.Sprintf("instance-type:%s", hostType.Str())) + hostInfo.GCPTags = append(hostInfo.GCPTags, "instance-type:"+hostType.Str()) } if cloudAccount, ok := attrs.Get(string(conventions.CloudAccountIDKey)); ok { - hostInfo.GCPTags = append(hostInfo.GCPTags, fmt.Sprintf("project:%s", cloudAccount.Str())) + hostInfo.GCPTags = append(hostInfo.GCPTags, "project:"+cloudAccount.Str()) } return diff --git a/pkg/opentelemetry-mapping-go/otlp/logs/orchestrator.go b/pkg/opentelemetry-mapping-go/otlp/logs/orchestrator.go index c38343c76e5576..fd2ff1c3228f69 100644 --- a/pkg/opentelemetry-mapping-go/otlp/logs/orchestrator.go +++ b/pkg/opentelemetry-mapping-go/otlp/logs/orchestrator.go @@ -7,6 +7,7 @@ package logs import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -115,12 +116,12 @@ func BuildManifestFromK8sResource(k8sResource map[string]interface{}, isTerminat // Extract metadata metadata, ok := k8sResource["metadata"].(map[string]interface{}) if !ok || metadata == nil { - return nil, fmt.Errorf("k8s resource missing metadata") + return nil, errors.New("k8s resource missing metadata") } uid, _ := metadata["uid"].(string) if uid == "" { - return nil, fmt.Errorf("k8s resource missing uid in metadata") + return nil, errors.New("k8s resource missing uid in metadata") } resourceVersion, _ := metadata["resourceVersion"].(string) @@ -309,7 +310,7 @@ func CreateClusterManifest(clusterID string, nodes []*agentmodel.Manifest, logge return &agentmodel.Manifest{ Type: int32(getManifestType("Cluster")), - ResourceVersion: fmt.Sprint(version), + ResourceVersion: strconv.FormatUint(version, 10), Uid: clusterID, Content: content, ContentType: "application/json", diff --git a/pkg/opentelemetry-mapping-go/otlp/logs/transform_test.go b/pkg/opentelemetry-mapping-go/otlp/logs/transform_test.go index c110a3f6083176..37a14878188dd8 100644 --- a/pkg/opentelemetry-mapping-go/otlp/logs/transform_test.go +++ b/pkg/opentelemetry-mapping-go/otlp/logs/transform_test.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "net/http" + "strconv" "testing" "github.com/DataDog/datadog-api-client-go/v2/api/datadog" @@ -185,8 +186,8 @@ func generateTranslatorTestCases(traceID [16]byte, spanID [8]byte, ddTr uint64, otelSeverityNumber: "5", otelSpanID: fmt.Sprintf("%x", string(spanID[:])), otelTraceID: fmt.Sprintf("%x", string(traceID[:])), - ddSpanID: fmt.Sprintf("%d", ddSp), - ddTraceID: fmt.Sprintf("%d", ddTr), + ddSpanID: strconv.FormatUint(ddSp, 10), + ddTraceID: strconv.FormatUint(ddTr, 10), "service.name": "otlp_col", }, }, @@ -337,8 +338,8 @@ func generateTranslatorTestCases(traceID [16]byte, spanID [8]byte, ddTr uint64, otelSeverityNumber: "5", otelSpanID: fmt.Sprintf("%x", string(spanID[:])), otelTraceID: fmt.Sprintf("%x", string(traceID[:])), - ddSpanID: fmt.Sprintf("%d", ddSp), - ddTraceID: fmt.Sprintf("%d", ddTr), + ddSpanID: strconv.FormatUint(ddSp, 10), + ddTraceID: strconv.FormatUint(ddTr, 10), "service.name": "otlp_col", }, }, @@ -370,8 +371,8 @@ func generateTranslatorTestCases(traceID [16]byte, spanID [8]byte, ddTr uint64, otelSeverityNumber: "13", otelSpanID: fmt.Sprintf("%x", string(spanID[:])), otelTraceID: fmt.Sprintf("%x", string(traceID[:])), - ddSpanID: fmt.Sprintf("%d", ddSp), - ddTraceID: fmt.Sprintf("%d", ddTr), + ddSpanID: strconv.FormatUint(ddSp, 10), + ddTraceID: strconv.FormatUint(ddTr, 10), "service.name": "otlp_col", }, }, @@ -402,8 +403,8 @@ func generateTranslatorTestCases(traceID [16]byte, spanID [8]byte, ddTr uint64, "status": "error", otelSpanID: fmt.Sprintf("%x", string(spanID[:])), otelTraceID: fmt.Sprintf("%x", string(traceID[:])), - ddSpanID: fmt.Sprintf("%d", ddSp), - ddTraceID: fmt.Sprintf("%d", ddTr), + ddSpanID: strconv.FormatUint(ddSp, 10), + ddTraceID: strconv.FormatUint(ddTr, 10), "service.name": "otlp_col", }, }, diff --git a/pkg/opentelemetry-mapping-go/otlp/logs/translator.go b/pkg/opentelemetry-mapping-go/otlp/logs/translator.go index c32db8ff0fb96a..ea719bb2b6bcbd 100644 --- a/pkg/opentelemetry-mapping-go/otlp/logs/translator.go +++ b/pkg/opentelemetry-mapping-go/otlp/logs/translator.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -95,7 +96,7 @@ func (t *Translator) hostFromAttributes(ctx context.Context, attrs pcommon.Map) // MapLogsAndRouteRUMEvents from OTLP format to Datadog format if shouldForwardOTLPRUMToDDRUM is true. func (t *Translator) MapLogsAndRouteRUMEvents(ctx context.Context, ld plog.Logs, hostFromAttributesHandler attributes.HostFromAttributesHandler, shouldForwardOTLPRUMToDDRUM bool, rumIntakeURL string) ([]datadogV2.HTTPLogItem, error) { if t.httpClient == nil { - return nil, fmt.Errorf("httpClient is nil") + return nil, errors.New("httpClient is nil") } rsl := ld.ResourceLogs() diff --git a/pkg/opentelemetry-mapping-go/otlp/metrics/dimensions.go b/pkg/opentelemetry-mapping-go/otlp/metrics/dimensions.go index 9b3d846e480a77..47a9b902ec31eb 100644 --- a/pkg/opentelemetry-mapping-go/otlp/metrics/dimensions.go +++ b/pkg/opentelemetry-mapping-go/otlp/metrics/dimensions.go @@ -139,9 +139,9 @@ func (d *Dimensions) String() string { dimensions := make([]string, len(d.tags)) copy(dimensions, d.tags) - dimensions = append(dimensions, fmt.Sprintf("name:%s", d.name)) - dimensions = append(dimensions, fmt.Sprintf("host:%s", d.host)) - dimensions = append(dimensions, fmt.Sprintf("originID:%s", d.originID)) + dimensions = append(dimensions, "name:"+d.name) + dimensions = append(dimensions, "host:"+d.host) + dimensions = append(dimensions, "originID:"+d.originID) sort.Strings(dimensions) for _, dim := range dimensions { diff --git a/pkg/opentelemetry-mapping-go/otlp/metrics/exponential_histograms_translator.go b/pkg/opentelemetry-mapping-go/otlp/metrics/exponential_histograms_translator.go index 0d870b61bb1414..0e791727213aef 100644 --- a/pkg/opentelemetry-mapping-go/otlp/metrics/exponential_histograms_translator.go +++ b/pkg/opentelemetry-mapping-go/otlp/metrics/exponential_histograms_translator.go @@ -16,6 +16,7 @@ package metrics import ( "context" + "errors" "fmt" "math" @@ -47,7 +48,7 @@ func (t *Translator) exponentialHistogramToDDSketch( delta bool, ) (*ddsketch.DDSketch, error) { if !delta { - return nil, fmt.Errorf("cumulative exponential histograms are not supported") + return nil, errors.New("cumulative exponential histograms are not supported") } // Create the DDSketch stores diff --git a/pkg/opentelemetry-mapping-go/otlp/metrics/metrics_remapping_test.go b/pkg/opentelemetry-mapping-go/otlp/metrics/metrics_remapping_test.go index f5a7024b593843..82f35bbdf288f5 100644 --- a/pkg/opentelemetry-mapping-go/otlp/metrics/metrics_remapping_test.go +++ b/pkg/opentelemetry-mapping-go/otlp/metrics/metrics_remapping_test.go @@ -15,7 +15,7 @@ package metrics import ( - "fmt" + "strconv" "strings" "testing" @@ -1027,7 +1027,7 @@ func TestHasAny(t *testing.T) { out: true, }, } { - require.Equal(t, hasAny(p(tt.attrs), tt.tags...), tt.out, fmt.Sprint(i)) + require.Equal(t, hasAny(p(tt.attrs), tt.tags...), tt.out, strconv.Itoa(i)) } } diff --git a/pkg/opentelemetry-mapping-go/otlp/metrics/metrics_translator.go b/pkg/opentelemetry-mapping-go/otlp/metrics/metrics_translator.go index a77c7660618dba..08288478fa6bd4 100644 --- a/pkg/opentelemetry-mapping-go/otlp/metrics/metrics_translator.go +++ b/pkg/opentelemetry-mapping-go/otlp/metrics/metrics_translator.go @@ -367,8 +367,8 @@ func (t *Translator) getSketchBuckets( // otherwise in the case where p.MExplicitBounds() has a size of 1 (eg. [0]), the two buckets // would have the same bucketTags (lower_bound:0 and upper_bound:0), resulting in a buggy behavior. bucketDims := pointDims.AddTags( - fmt.Sprintf("lower_bound:%s", formatFloat(lowerBound)), - fmt.Sprintf("upper_bound:%s", formatFloat(upperBound)), + "lower_bound:"+formatFloat(lowerBound), + "upper_bound:"+formatFloat(upperBound), ) // InsertInterpolate doesn't work with an infinite bound; insert in to the bucket that contains the non-infinite bound @@ -464,8 +464,8 @@ func (t *Translator) getLegacyBuckets( for idx := 0; idx < p.BucketCounts().Len(); idx++ { lowerBound, upperBound := getBounds(p.ExplicitBounds(), idx) bucketDims := baseBucketDims.AddTags( - fmt.Sprintf("lower_bound:%s", formatFloat(lowerBound)), - fmt.Sprintf("upper_bound:%s", formatFloat(upperBound)), + "lower_bound:"+formatFloat(lowerBound), + "upper_bound:"+formatFloat(upperBound), ) count := float64(p.BucketCounts().At(idx)) @@ -599,7 +599,7 @@ func formatFloat(f float64) string { // getQuantileTag returns the quantile tag for summary types. func getQuantileTag(quantile float64) string { - return fmt.Sprintf("quantile:%s", formatFloat(quantile)) + return "quantile:" + formatFloat(quantile) } // mapSummaryMetrics maps summary datapoints into Datadog metrics diff --git a/pkg/opentelemetry-mapping-go/otlp/rum/rum.go b/pkg/opentelemetry-mapping-go/otlp/rum/rum.go index e4e6280f732788..33223f5c76f1aa 100644 --- a/pkg/opentelemetry-mapping-go/otlp/rum/rum.go +++ b/pkg/opentelemetry-mapping-go/otlp/rum/rum.go @@ -9,6 +9,7 @@ import ( "crypto/rand" "encoding/binary" "encoding/hex" + "errors" "fmt" "net/url" "strconv" @@ -83,12 +84,12 @@ func ConstructRumPayloadFromOTLP(attr pcommon.Map) map[string]any { func parseIDs(payload map[string]any) (pcommon.TraceID, pcommon.SpanID, error) { ddMetadata, ok := payload["_dd"].(map[string]any) if !ok { - return pcommon.NewTraceIDEmpty(), pcommon.NewSpanIDEmpty(), fmt.Errorf("failed to find _dd metadata in payload") + return pcommon.NewTraceIDEmpty(), pcommon.NewSpanIDEmpty(), errors.New("failed to find _dd metadata in payload") } traceIDString, ok := ddMetadata["trace_id"].(string) if !ok { - return pcommon.NewTraceIDEmpty(), pcommon.NewSpanIDEmpty(), fmt.Errorf("failed to retrieve traceID from payload") + return pcommon.NewTraceIDEmpty(), pcommon.NewSpanIDEmpty(), errors.New("failed to retrieve traceID from payload") } traceID, err := strconv.ParseUint(traceIDString, 10, 64) if err != nil { @@ -97,7 +98,7 @@ func parseIDs(payload map[string]any) (pcommon.TraceID, pcommon.SpanID, error) { spanIDString, ok := ddMetadata["span_id"].(string) if !ok { - return pcommon.NewTraceIDEmpty(), pcommon.NewSpanIDEmpty(), fmt.Errorf("failed to retrieve spanID from payload") + return pcommon.NewTraceIDEmpty(), pcommon.NewSpanIDEmpty(), errors.New("failed to retrieve spanID from payload") } spanID, err := strconv.ParseUint(spanIDString, 10, 64) if err != nil { diff --git a/pkg/orchestrator/redact/data_scrubber.go b/pkg/orchestrator/redact/data_scrubber.go index 6275527b784bc7..f84d8827518b98 100644 --- a/pkg/orchestrator/redact/data_scrubber.go +++ b/pkg/orchestrator/redact/data_scrubber.go @@ -107,7 +107,7 @@ func (ds *DataScrubber) ScrubSimpleCommand(cmd, args []string) ([]string, []stri for _, pattern := range ds.RegexSensitivePatterns { if pattern.MatchString(rawCmdline) { regexChanged = true - rawCmdline = pattern.ReplaceAllString(rawCmdline, fmt.Sprintf(`${key}${delimiter}%s`, redactedSecret)) + rawCmdline = pattern.ReplaceAllString(rawCmdline, "${key}${delimiter}"+redactedSecret) } } diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go index 791e212c85167f..337e816cec6dba 100644 --- a/pkg/pidfile/pidfile.go +++ b/pkg/pidfile/pidfile.go @@ -34,7 +34,7 @@ func WritePID(pidFilePath string) error { } // write current pid in it - pidStr := fmt.Sprintf("%d", os.Getpid()) + pidStr := strconv.Itoa(os.Getpid()) if err := os.WriteFile(pidFilePath, []byte(pidStr), 0644); err != nil { return err } diff --git a/pkg/privileged-logs/client/open.go b/pkg/privileged-logs/client/open.go index 4e6d6962e564aa..1e840909929d8d 100644 --- a/pkg/privileged-logs/client/open.go +++ b/pkg/privileged-logs/client/open.go @@ -59,7 +59,7 @@ func OpenPrivileged(socketPath string, filePath string) (*os.File, error) { unixConn, ok := conn.(*net.UnixConn) if !ok { - return nil, fmt.Errorf("not a Unix connection") + return nil, errors.New("not a Unix connection") } // Read the message and file descriptor using ReadMsgUnix @@ -73,7 +73,7 @@ func OpenPrivileged(socketPath string, filePath string) (*os.File, error) { } if n == 0 { - return nil, fmt.Errorf("no response received") + return nil, errors.New("no response received") } var response common.OpenFileResponse @@ -108,7 +108,7 @@ func OpenPrivileged(socketPath string, filePath string) (*os.File, error) { } } - return nil, fmt.Errorf("no file descriptor received") + return nil, errors.New("no file descriptor received") } // Open attempts to open a file, and if it fails due to permissions, it opens diff --git a/pkg/privileged-logs/module/validate.go b/pkg/privileged-logs/module/validate.go index 63f366af335586..a88434f0d1848f 100644 --- a/pkg/privileged-logs/module/validate.go +++ b/pkg/privileged-logs/module/validate.go @@ -37,7 +37,7 @@ func isTextFile(file *os.File) bool { func validateAndOpenWithPrefix(path, allowedPrefix string) (*os.File, error) { if path == "" { - return nil, fmt.Errorf("empty file path provided") + return nil, errors.New("empty file path provided") } if !filepath.IsAbs(path) { @@ -78,7 +78,7 @@ func validateAndOpenWithPrefix(path, allowedPrefix string) (*os.File, error) { // (expanding symlinks, but protecting against symlink attacks). file, err = os.OpenInRoot(allowedPrefix, relativePath) } else { - err = fmt.Errorf("non-log file not allowed") + err = errors.New("non-log file not allowed") } if err != nil { return nil, fmt.Errorf("failed to open file %s: %v", path, err) diff --git a/pkg/process/checks/container.go b/pkg/process/checks/container.go index f85f8164f57fe6..62e09e6ec6de4c 100644 --- a/pkg/process/checks/container.go +++ b/pkg/process/checks/container.go @@ -6,7 +6,6 @@ package checks import ( - "fmt" "math" "net/http" "sync" @@ -154,7 +153,7 @@ func (c *ContainerCheck) Run(nextGroupID func() int32, options *RunOptions) (Run } numContainers := float64(len(containers)) - agentNameTag := fmt.Sprintf("agent:%s", flavor.GetFlavor()) + agentNameTag := "agent:" + flavor.GetFlavor() _ = c.statsd.Gauge("datadog.process.containers.host_count", numContainers, []string{agentNameTag}, 1) log.Debugf("collected %d containers in %s", int(numContainers), time.Since(startTime)) return StandardRunResult(messages), nil diff --git a/pkg/process/checks/net_test.go b/pkg/process/checks/net_test.go index 5a2ee927032a72..435174104a6150 100644 --- a/pkg/process/checks/net_test.go +++ b/pkg/process/checks/net_test.go @@ -36,7 +36,7 @@ func makeConnections(n int) []*model.Connection { conns := make([]*model.Connection, 0) for i := 1; i <= n; i++ { c := makeConnection(int32(i)) - c.Laddr = &model.Addr{ContainerId: fmt.Sprintf("%d", c.Pid)} + c.Laddr = &model.Addr{ContainerId: strconv.Itoa(int(c.Pid))} c.RouteIdx = int32(-1) conns = append(conns, c) @@ -148,7 +148,7 @@ func TestNetworkConnectionBatching(t *testing.T) { assert.Equal(t, "nid", connections.NetworkId) for _, conn := range connections.Connections { assert.Contains(t, connections.ContainerForPid, conn.Pid) - assert.Equal(t, fmt.Sprintf("%d", conn.Pid), connections.ContainerForPid[conn.Pid]) + assert.Equal(t, strconv.Itoa(int(conn.Pid)), connections.ContainerForPid[conn.Pid]) } // ensure only first chunk has telemetry @@ -200,7 +200,7 @@ func TestNetworkConnectionBatchingWithDNS(t *testing.T) { assert.Equal(t, "nid", connections.NetworkId) for _, conn := range connections.Connections { assert.Contains(t, connections.ContainerForPid, conn.Pid) - assert.Equal(t, fmt.Sprintf("%d", conn.Pid), connections.ContainerForPid[conn.Pid]) + assert.Equal(t, strconv.Itoa(int(conn.Pid)), connections.ContainerForPid[conn.Pid]) } } assert.Equal(t, 4, total) diff --git a/pkg/process/checks/process.go b/pkg/process/checks/process.go index 34e1372a4b620a..809d34bbeb6bcf 100644 --- a/pkg/process/checks/process.go +++ b/pkg/process/checks/process.go @@ -8,7 +8,6 @@ package checks import ( "crypto/tls" "errors" - "fmt" "math" "net/http" "regexp" @@ -354,7 +353,7 @@ func (p *ProcessCheck) run(groupID int32, collectRealTime bool) (RunResult, erro p.realtimeLastRun = p.lastRun } - agentNameTag := fmt.Sprintf("agent:%s", flavor.GetFlavor()) + agentNameTag := "agent:" + flavor.GetFlavor() _ = p.statsd.Gauge("datadog.process.containers.host_count", float64(totalContainers), []string{agentNameTag}, 1) _ = p.statsd.Gauge("datadog.process.processes.host_count", float64(totalProcs), []string{agentNameTag}, 1) log.Debugf("collected processes in %s", time.Since(start)) diff --git a/pkg/process/checks/process_data_test.go b/pkg/process/checks/process_data_test.go index ce0d0dc4acee08..31febd9a293b32 100644 --- a/pkg/process/checks/process_data_test.go +++ b/pkg/process/checks/process_data_test.go @@ -6,7 +6,7 @@ package checks import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/assert" @@ -69,7 +69,7 @@ func TestProcessDataFetch(t *testing.T) { if tc.wantErr { probe.On("ProcessesByPID", mock.Anything, mock.Anything). - Return(nil, fmt.Errorf("unable to retrieve process data")) + Return(nil, errors.New("unable to retrieve process data")) assert.Error(t, p.Fetch()) } else { probe.On("ProcessesByPID", mock.Anything, mock.Anything). diff --git a/pkg/process/checks/process_discovery_check.go b/pkg/process/checks/process_discovery_check.go index 533ee389a2a7ec..17b3ed654e69cc 100644 --- a/pkg/process/checks/process_discovery_check.go +++ b/pkg/process/checks/process_discovery_check.go @@ -6,7 +6,7 @@ package checks import ( - "fmt" + "errors" "time" "github.com/DataDog/datadog-agent/pkg/config/env" @@ -91,7 +91,7 @@ func (d *ProcessDiscoveryCheck) ShouldSaveLastRun() bool { return true } // It is a runtime error to call Run without first having called Init. func (d *ProcessDiscoveryCheck) Run(nextGroupID func() int32, options *RunOptions) (RunResult, error) { if !d.initCalled { - return nil, fmt.Errorf("ProcessDiscoveryCheck.Run called before Init") + return nil, errors.New("ProcessDiscoveryCheck.Run called before Init") } // Does not need to collect process stats, only metadata diff --git a/pkg/process/checks/process_linux_test.go b/pkg/process/checks/process_linux_test.go index 7c81f8a7d7dfc2..6d9542681c4f5e 100644 --- a/pkg/process/checks/process_linux_test.go +++ b/pkg/process/checks/process_linux_test.go @@ -8,8 +8,8 @@ package checks import ( - "fmt" "math/rand/v2" + "strconv" "strings" "testing" "time" @@ -435,7 +435,7 @@ func TestFormatServiceDiscovery(t *testing.T) { func wlmProcessWithCreateTime(pid int32, spaceSeparatedCmdline string, creationTime int64) *wmdef.Process { return &wmdef.Process{ EntityID: wmdef.EntityID{ - ID: fmt.Sprintf("%d", pid), + ID: strconv.Itoa(int(pid)), Kind: wmdef.KindProcess, }, Pid: pid, diff --git a/pkg/process/checks/system_info_darwin.go b/pkg/process/checks/system_info_darwin.go index b365e0aaec3b0c..f3606ee08179b2 100644 --- a/pkg/process/checks/system_info_darwin.go +++ b/pkg/process/checks/system_info_darwin.go @@ -8,7 +8,7 @@ package checks import ( - "fmt" + "errors" model "github.com/DataDog/agent-payload/v5/process" // difference between methods for collecting macOS platform, kernel version @@ -43,7 +43,7 @@ func patchCPUInfo(gopsutilCPUInfo []cpu.InfoStat) ([]cpu.InfoStat, error) { physicalCoreCount := int(cpuInfo.Cores) threadCount, err := macosStatsProvider.getThreadCount() if err != nil { - return nil, fmt.Errorf("could not get thread count") + return nil, errors.New("could not get thread count") } cpuStat := make([]cpu.InfoStat, 0, physicalCoreCount) diff --git a/pkg/process/checks/system_info_windows.go b/pkg/process/checks/system_info_windows.go index d0d3cec950128e..68ccb0a40cd644 100644 --- a/pkg/process/checks/system_info_windows.go +++ b/pkg/process/checks/system_info_windows.go @@ -6,11 +6,11 @@ package checks import ( + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/gohai/cpu" "github.com/DataDog/datadog-agent/pkg/gohai/platform" - "github.com/DataDog/datadog-agent/pkg/util/winutil" model "github.com/DataDog/agent-payload/v5/process" @@ -39,7 +39,7 @@ func CollectSystemInfo() (*model.SystemInfo, error) { // shouldn't be possible, as `cpuInfo.CPUPkgs.Value()` should return an error in this case // but double check before risking a divide by zero if physCount == 0 { - return nil, fmt.Errorf("Returned zero physical processors") + return nil, errors.New("Returned zero physical processors") } cpus := make([]*model.CPUInfo, 0) logicalCountPerPhys := logicalCount / physCount diff --git a/pkg/process/metadata/parser/scm_reader.go b/pkg/process/metadata/parser/scm_reader.go index b368227937fc1a..5503ac8f43d34a 100644 --- a/pkg/process/metadata/parser/scm_reader.go +++ b/pkg/process/metadata/parser/scm_reader.go @@ -8,7 +8,7 @@ package parser import ( - "fmt" + "errors" ) type scmReader struct{} @@ -20,5 +20,5 @@ func newSCMReader() *scmReader { } func (s *scmReader) getServiceInfo(_ uint64) (*WindowsServiceInfo, error) { - return nil, fmt.Errorf("scm service info is only available on windows") + return nil, errors.New("scm service info is only available on windows") } diff --git a/pkg/process/procutil/process_fallback.go b/pkg/process/procutil/process_fallback.go index 92b10e27997ae9..78712a940984fd 100644 --- a/pkg/process/procutil/process_fallback.go +++ b/pkg/process/procutil/process_fallback.go @@ -8,7 +8,7 @@ package procutil import ( - "fmt" + "errors" "time" // using process.AllProcesses() @@ -47,5 +47,5 @@ func (p *probe) ProcessesByPID(_ time.Time, _ bool) (map[int32]*Process, error) } func (p *probe) StatsWithPermByPID(_ []int32) (map[int32]*StatsWithPerm, error) { - return nil, fmt.Errorf("StatsWithPermByPID is not implemented in this environment") + return nil, errors.New("StatsWithPermByPID is not implemented in this environment") } diff --git a/pkg/process/procutil/process_windows.go b/pkg/process/procutil/process_windows.go index 5f30c7cfd63cad..0f28c8425d9028 100644 --- a/pkg/process/procutil/process_windows.go +++ b/pkg/process/procutil/process_windows.go @@ -9,6 +9,7 @@ package procutil import ( "bytes" + "errors" "fmt" "strings" "time" @@ -294,7 +295,7 @@ func (p *probe) enumCounters(collectMeta bool, collectStats bool) error { } func (p *probe) StatsWithPermByPID(_ []int32) (map[int32]*StatsWithPerm, error) { - return nil, fmt.Errorf("probe(Windows): StatsWithPermByPID is not implemented") + return nil, errors.New("probe(Windows): StatsWithPermByPID is not implemented") } func (p *probe) getProc(instance string) *Process { diff --git a/pkg/process/procutil/process_windows_toolhelp.go b/pkg/process/procutil/process_windows_toolhelp.go index db5f89e98f9a0f..99b2b570b38230 100644 --- a/pkg/process/procutil/process_windows_toolhelp.go +++ b/pkg/process/procutil/process_windows_toolhelp.go @@ -8,7 +8,7 @@ package procutil import ( - "fmt" + "errors" "runtime" "time" "unsafe" @@ -91,7 +91,7 @@ func (p *windowsToolhelpProbe) StatsForPIDs(_ []int32, now time.Time) (map[int32 // StatsWithPermByPID is currently not implemented in non-linux environments func (p *windowsToolhelpProbe) StatsWithPermByPID(_ []int32) (map[int32]*StatsWithPerm, error) { - return nil, fmt.Errorf("windowsToolhelpProbe: StatsWithPermByPID is not implemented") + return nil, errors.New("windowsToolhelpProbe: StatsWithPermByPID is not implemented") } func (p *windowsToolhelpProbe) ProcessesByPID(_ time.Time, collectStats bool) (map[int32]*Process, error) { diff --git a/pkg/process/runner/collector_api_test.go b/pkg/process/runner/collector_api_test.go index 7d300e93557cec..a148a958776854 100644 --- a/pkg/process/runner/collector_api_test.go +++ b/pkg/process/runner/collector_api_test.go @@ -7,7 +7,6 @@ package runner import ( - "fmt" "io" "net" "net/http" @@ -508,7 +507,7 @@ func (m *mockEndpoint) start() *url.URL { close(addrC) - collectorEndpoint, err := url.Parse(fmt.Sprintf("http://%s", collectorAddr.String())) + collectorEndpoint, err := url.Parse("http://" + collectorAddr.String()) require.NoError(m.t, err) return collectorEndpoint diff --git a/pkg/process/runner/submitter.go b/pkg/process/runner/submitter.go index c7720644680c74..f8ea334ba35e04 100644 --- a/pkg/process/runner/submitter.go +++ b/pkg/process/runner/submitter.go @@ -420,7 +420,7 @@ func (s *CheckSubmitter) getRequestID(start time.Time, chunkIndex int) string { // Next, we take up to 14 bits to represent the message index in the batch. // It means that we support up to 16384 (2 ^ 14) different messages being sent on the same batch. chunk := uint64(chunkIndex & chunkMask) - return fmt.Sprintf("%d", seconds+*s.requestIDCachedHash+chunk) + return strconv.FormatUint(seconds+*s.requestIDCachedHash+chunk, 10) } func (s *CheckSubmitter) shouldDropPayload(check string) bool { @@ -430,8 +430,8 @@ func (s *CheckSubmitter) shouldDropPayload(check string) bool { func (s *CheckSubmitter) heartbeat(heartbeatTicker *clock.Ticker) { agentVersion, _ := version.Agent() tags := []string{ - fmt.Sprintf("version:%s", agentVersion.GetNumberAndPre()), - fmt.Sprintf("revision:%s", agentVersion.Commit), + "version:" + agentVersion.GetNumberAndPre(), + "revision:" + agentVersion.Commit, } for { diff --git a/pkg/proto/pbgo/trace/idx/internal_span.go b/pkg/proto/pbgo/trace/idx/internal_span.go index 362b123f73f527..919e8b147df6ec 100644 --- a/pkg/proto/pbgo/trace/idx/internal_span.go +++ b/pkg/proto/pbgo/trace/idx/internal_span.go @@ -7,6 +7,7 @@ package idx import ( "encoding/binary" + "errors" "fmt" "maps" "strconv" @@ -1100,13 +1101,13 @@ func (attr *AnyValue) AsDoubleValue(strTable *StringTable) (float64, error) { case *AnyValue_IntValue: return float64(v.IntValue), nil case *AnyValue_BytesValue: - return 0, fmt.Errorf("bytes value not a float64") + return 0, errors.New("bytes value not a float64") case *AnyValue_ArrayValue: - return 0, fmt.Errorf("array value not a float64") + return 0, errors.New("array value not a float64") case *AnyValue_KeyValueList: - return 0, fmt.Errorf("key-value list value not a float64") + return 0, errors.New("key-value list value not a float64") default: - return 0, fmt.Errorf("unknown value type not a float64") + return 0, errors.New("unknown value type not a float64") } } diff --git a/pkg/proto/pbgo/trace/utils_test.go b/pkg/proto/pbgo/trace/utils_test.go index 0cf8ba83864066..b89a8edfa4b067 100644 --- a/pkg/proto/pbgo/trace/utils_test.go +++ b/pkg/proto/pbgo/trace/utils_test.go @@ -6,7 +6,6 @@ package trace import ( - fmt "fmt" reflect "reflect" "testing" ) @@ -29,7 +28,7 @@ func TestShallowCopy(t *testing.T) { continue } if _, ok := spanCopiedFields[field.Name]; !ok { - panic(fmt.Sprintf("pkg/trace/pb/span_utils.go: ShallowCopy needs to be updated for new Span fields. Missing: %s", field.Name)) + panic("pkg/trace/pb/span_utils.go: ShallowCopy needs to be updated for new Span fields. Missing: " + field.Name) } } }) @@ -49,7 +48,7 @@ func TestShallowCopy(t *testing.T) { continue } if _, ok := traceChunkCopiedFields[field.Name]; !ok { - panic(fmt.Sprintf("pkg/trace/pb/tracer_payload_utils.go: ShallowCopy needs to be updated for new TraceChunk fields. Missing: %s", field.Name)) + panic("pkg/trace/pb/tracer_payload_utils.go: ShallowCopy needs to be updated for new TraceChunk fields. Missing: " + field.Name) } } }) diff --git a/pkg/remoteconfig/state/path.go b/pkg/remoteconfig/state/path.go index d1a4d69e2169b6..8d95857598c6f3 100644 --- a/pkg/remoteconfig/state/path.go +++ b/pkg/remoteconfig/state/path.go @@ -6,6 +6,7 @@ package state import ( + "errors" "fmt" "regexp" "strconv" @@ -61,7 +62,7 @@ func parseDatadogConfigPath(path string) (configPath, error) { } rawProduct := matchedGroups[2] if len(rawProduct) == 0 { - return configPath{}, fmt.Errorf("product is empty") + return configPath{}, errors.New("product is empty") } return configPath{ Source: sourceDatadog, @@ -79,7 +80,7 @@ func parseEmployeeConfigPath(path string) (configPath, error) { } rawProduct := matchedGroups[1] if len(rawProduct) == 0 { - return configPath{}, fmt.Errorf("product is empty") + return configPath{}, errors.New("product is empty") } return configPath{ Source: sourceEmployee, diff --git a/pkg/remoteconfig/state/tuf.go b/pkg/remoteconfig/state/tuf.go index f67ab9c198b132..37c926adda83da 100644 --- a/pkg/remoteconfig/state/tuf.go +++ b/pkg/remoteconfig/state/tuf.go @@ -8,6 +8,7 @@ package state import ( "bytes" "encoding/json" + "errors" "fmt" "io" "strconv" @@ -96,7 +97,7 @@ func (trc *tufRootsClient) validateTargets(rawTargets []byte) (*data.Targets, er } targetsRole, hasRoleTargets := root.Roles["targets"] if !hasRoleTargets { - return nil, fmt.Errorf("root is missing a targets role") + return nil, errors.New("root is missing a targets role") } role := &data.Role{Threshold: targetsRole.Threshold, KeyIDs: targetsRole.KeyIDs} if err := db.AddRole("targets", role); err != nil { @@ -182,7 +183,7 @@ func parseMetaPath(rawMetaPath string) (metaPath, error) { func validateTargetFileHash(targetMeta data.TargetFileMeta, targetFile []byte) error { if len(targetMeta.HashAlgorithms()) == 0 { - return fmt.Errorf("target file has no hash") + return errors.New("target file has no hash") } generatedMeta, err := util.GenerateFileMeta(bytes.NewBuffer(targetFile), targetMeta.HashAlgorithms()...) if err != nil { diff --git a/pkg/sbom/bomconvert/convert.go b/pkg/sbom/bomconvert/convert.go index 49b5405cff09f4..5e1237f1320295 100644 --- a/pkg/sbom/bomconvert/convert.go +++ b/pkg/sbom/bomconvert/convert.go @@ -7,7 +7,7 @@ package bomconvert import ( - "fmt" + "strconv" "time" "unsafe" @@ -70,7 +70,7 @@ func (b *bomConvertor) getOrCreateBOMRef(in string) string { } b.bomRefCounter++ - mappedRef := fmt.Sprintf("%d", b.bomRefCounter) + mappedRef := strconv.Itoa(b.bomRefCounter) b.bomRefMapper[in] = mappedRef return mappedRef } diff --git a/pkg/sbom/collectors/containerd/containerd.go b/pkg/sbom/collectors/containerd/containerd.go index ee15251203e9ff..1901d10a056ae3 100644 --- a/pkg/sbom/collectors/containerd/containerd.go +++ b/pkg/sbom/collectors/containerd/containerd.go @@ -10,6 +10,7 @@ package containerd import ( "context" + "errors" "fmt" "github.com/containerd/containerd" @@ -101,7 +102,7 @@ func (c *Collector) Scan(ctx context.Context, request sbom.ScanRequest) sbom.Sca wmeta, ok := c.wmeta.Get() if !ok { - return sbom.ScanResult{Error: fmt.Errorf("workloadmeta store is not initialized")} + return sbom.ScanResult{Error: errors.New("workloadmeta store is not initialized")} } imageMeta, err := wmeta.GetImage(imageID) if err != nil { diff --git a/pkg/sbom/collectors/crio/crio.go b/pkg/sbom/collectors/crio/crio.go index 2cb8d47a7da490..898fd8a6122872 100644 --- a/pkg/sbom/collectors/crio/crio.go +++ b/pkg/sbom/collectors/crio/crio.go @@ -9,6 +9,7 @@ package crio import ( "context" + "errors" "fmt" "github.com/DataDog/datadog-agent/comp/core/config" @@ -79,7 +80,7 @@ func (c *Collector) Init(cfg config.Component, wmeta option.Option[workloadmeta. // Scan performs the scan using CRI-O methods func (c *Collector) Scan(ctx context.Context, request sbom.ScanRequest) sbom.ScanResult { if !c.opts.OverlayFsScan { - return sbom.ScanResult{Error: fmt.Errorf("overlayfs direct scan is not enabled, but required to scan CRI-O images")} + return sbom.ScanResult{Error: errors.New("overlayfs direct scan is not enabled, but required to scan CRI-O images")} } imageID := request.ID() @@ -94,7 +95,7 @@ func (c *Collector) Scan(ctx context.Context, request sbom.ScanRequest) sbom.Sca wmeta, ok := c.wmeta.Get() if !ok { - return sbom.ScanResult{Error: fmt.Errorf("workloadmeta store is not initialized")} + return sbom.ScanResult{Error: errors.New("workloadmeta store is not initialized")} } imageMeta, err := wmeta.GetImage(imageID) diff --git a/pkg/sbom/collectors/docker/docker.go b/pkg/sbom/collectors/docker/docker.go index df77aa7fd16a46..7e1d2514ab8102 100644 --- a/pkg/sbom/collectors/docker/docker.go +++ b/pkg/sbom/collectors/docker/docker.go @@ -9,6 +9,7 @@ package docker import ( "context" + "errors" "fmt" "github.com/DataDog/datadog-agent/comp/core/config" @@ -94,7 +95,7 @@ func (c *Collector) Scan(ctx context.Context, request sbom.ScanRequest) sbom.Sca wmeta, ok := c.wmeta.Get() if !ok { - return sbom.ScanResult{Error: fmt.Errorf("workloadmeta store is not initialized")} + return sbom.ScanResult{Error: errors.New("workloadmeta store is not initialized")} } imageMeta, err := wmeta.GetImage(imageID) diff --git a/pkg/security/agent/agent.go b/pkg/security/agent/agent.go index 11fbafaba8214d..381b332318b7d2 100644 --- a/pkg/security/agent/agent.go +++ b/pkg/security/agent/agent.go @@ -312,7 +312,7 @@ func (rsa *RuntimeSecurityAgent) setupGPRC() error { family, socketPath := socket.GetSocketAddress(socketPath) if family == "unix" && runtime.GOOS == "windows" { - return fmt.Errorf("unix sockets are not supported on Windows") + return errors.New("unix sockets are not supported on Windows") } rsa.eventGPRCServer = grpcutils.NewServer(family, socketPath) diff --git a/pkg/security/agent/client.go b/pkg/security/agent/client.go index ee38ca5587b9d5..bf7971910c35d3 100644 --- a/pkg/security/agent/client.go +++ b/pkg/security/agent/client.go @@ -8,6 +8,7 @@ package agent import ( "context" + "errors" "fmt" "net" "runtime" @@ -201,10 +202,10 @@ func NewRuntimeSecurityCmdClient() (*RuntimeSecurityCmdClient, error) { family, cmdSocketPath := socket.GetSocketAddress(cmdSocketPath) if family == "unix" { if runtime.GOOS == "windows" { - return nil, fmt.Errorf("unix sockets are not supported on Windows") + return nil, errors.New("unix sockets are not supported on Windows") } - cmdSocketPath = fmt.Sprintf("unix://%s", cmdSocketPath) + cmdSocketPath = "unix://" + cmdSocketPath } conn, err := grpc.NewClient( @@ -239,10 +240,10 @@ func NewRuntimeSecurityEventClient() (*RuntimeSecurityEventClient, error) { family := socket.GetFamilyAddress(socketPath) if family == "unix" { if runtime.GOOS == "windows" { - return nil, fmt.Errorf("unix sockets are not supported on Windows") + return nil, errors.New("unix sockets are not supported on Windows") } - socketPath = fmt.Sprintf("unix://%s", socketPath) + socketPath = "unix://" + socketPath } opts := []grpc.DialOption{ diff --git a/pkg/security/common/account_id.go b/pkg/security/common/account_id.go index 18ed3063af4bc3..9106c62165d7ef 100644 --- a/pkg/security/common/account_id.go +++ b/pkg/security/common/account_id.go @@ -8,6 +8,7 @@ package common import ( "context" + "errors" "fmt" "sync" "time" @@ -47,7 +48,7 @@ func queryAccountID(ctx context.Context) (string, string, error) { } } - return "", "", fmt.Errorf("no cloud provider detected") + return "", "", errors.New("no cloud provider detected") } var accountIDTagCache struct { diff --git a/pkg/security/common/address_utils.go b/pkg/security/common/address_utils.go index 20111b9522f252..2286dc1b75e7a0 100644 --- a/pkg/security/common/address_utils.go +++ b/pkg/security/common/address_utils.go @@ -34,7 +34,7 @@ func GetCmdSocketPath(socketPath string, cmdSocketPath string) (string, error) { family := GetFamilyAddress(socketPath) if family == "unix" { if runtime.GOOS == "windows" { - return "", fmt.Errorf("unix sockets are not supported on Windows") + return "", errors.New("unix sockets are not supported on Windows") } socketDir, socketName := filepath.Split(socketPath) diff --git a/pkg/security/events/rate_limiter.go b/pkg/security/events/rate_limiter.go index b6c239427864ba..5a66e5901aeb00 100644 --- a/pkg/security/events/rate_limiter.go +++ b/pkg/security/events/rate_limiter.go @@ -7,7 +7,6 @@ package events import ( - "fmt" "sync" "time" @@ -151,7 +150,7 @@ func (rl *RateLimiter) GetStats() map[string][]utils.LimiterStat { // for the set of rules func (rl *RateLimiter) SendStats() error { for ruleID, stats := range rl.GetStats() { - ruleIDTag := fmt.Sprintf("rule_id:%s", ruleID) + ruleIDTag := "rule_id:" + ruleID for _, stat := range stats { tags := []string{ruleIDTag} if len(stat.Tags) > 0 { diff --git a/pkg/security/events/token_limiter.go b/pkg/security/events/token_limiter.go index c52069f5201d46..01fbe6a68458ce 100644 --- a/pkg/security/events/token_limiter.go +++ b/pkg/security/events/token_limiter.go @@ -8,6 +8,7 @@ package events import ( "fmt" + "strings" "time" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" @@ -42,20 +43,19 @@ func (tkl *TokenLimiter) genGetTokenFnc(fields []eval.Field) error { } tkl.getToken = func(event Event) string { - var token string + var builder strings.Builder for i, field := range fields { value, err := event.GetFieldValue(field) if err != nil { return "" } - if i == 0 { - token = fmt.Sprintf("%s:%v", field, value) - } else { - token += fmt.Sprintf(";%s:%v", field, value) + if i != 0 { + builder.WriteString(";") } + fmt.Fprintf(&builder, "%s:%v", field, value) } - return token + return builder.String() } return nil diff --git a/pkg/security/generators/accessors/accessors.go b/pkg/security/generators/accessors/accessors.go index 1234334ca08b4c..7b703383541d71 100644 --- a/pkg/security/generators/accessors/accessors.go +++ b/pkg/security/generators/accessors/accessors.go @@ -772,7 +772,7 @@ func sortFieldsByChecks(module *common.Module) { func parseFile(modelFile string, typesFile string, pkgName string) (*common.Module, error) { cfg := packages.Config{ Mode: packages.NeedSyntax | packages.NeedTypes | packages.NeedImports, - BuildFlags: []string{"-mod=readonly", fmt.Sprintf("-tags=%s", buildTags)}, + BuildFlags: []string{"-mod=readonly", "-tags=" + buildTags}, } astFiles, err := newAstFiles(&cfg, modelFile, typesFile) @@ -819,7 +819,7 @@ func formatBuildTags(buildTags string) []string { var formattedBuildTags []string for _, tag := range splittedBuildTags { if tag != "" { - formattedBuildTags = append(formattedBuildTags, fmt.Sprintf("go:build %s", tag)) + formattedBuildTags = append(formattedBuildTags, "go:build "+tag) } } return formattedBuildTags diff --git a/pkg/security/generators/accessors/doc/doc.go b/pkg/security/generators/accessors/doc/doc.go index 3e04d5f820f17a..a5e13c903e6419 100644 --- a/pkg/security/generators/accessors/doc/doc.go +++ b/pkg/security/generators/accessors/doc/doc.go @@ -8,6 +8,7 @@ package doc import ( "encoding/json" + "errors" "fmt" "go/ast" "os" @@ -325,7 +326,7 @@ func parseConstantsFile(filepath string, tags []string) ([]constants, error) { } if len(pkgs) == 0 || len(pkgs[0].Syntax) == 0 { - return nil, fmt.Errorf("couldn't parse constant file") + return nil, errors.New("couldn't parse constant file") } pkg := pkgs[0] diff --git a/pkg/security/module/client.go b/pkg/security/module/client.go index 678bed50c1d9f4..eb8d8b4d5be404 100644 --- a/pkg/security/module/client.go +++ b/pkg/security/module/client.go @@ -146,10 +146,10 @@ func NewSecurityAgentAPIClient(cfg *config.RuntimeSecurityConfig) (*SecurityAgen family, socketPath := socket.GetSocketAddress(cfg.SocketPath) if family == "unix" { if runtime.GOOS == "windows" { - return nil, fmt.Errorf("unix sockets are not supported on Windows") + return nil, errors.New("unix sockets are not supported on Windows") } - socketPath = fmt.Sprintf("unix://%s", socketPath) + socketPath = "unix://" + socketPath } opts := []grpc.DialOption{ diff --git a/pkg/security/module/cws.go b/pkg/security/module/cws.go index c8715a212c3819..b1ecc8efdb5883 100644 --- a/pkg/security/module/cws.go +++ b/pkg/security/module/cws.go @@ -302,9 +302,9 @@ func (c *CWSConsumer) reportSelfTest(success []eval.RuleID, fails []eval.RuleID) tags := []string{ fmt.Sprintf("success:%d", len(success)), fmt.Sprintf("fails:%d", len(fails)), - fmt.Sprintf("os:%s", runtime.GOOS), - fmt.Sprintf("arch:%s", utils.RuntimeArch()), - fmt.Sprintf("origin:%s", c.probe.Origin()), + "os:" + runtime.GOOS, + "arch:" + utils.RuntimeArch(), + "origin:" + c.probe.Origin(), } if err := c.statsdClient.Gauge(metrics.MetricSelfTest, 1.0, tags, 1.0); err != nil { seclog.Errorf("failed to send self_test metric: %s", err) @@ -378,8 +378,8 @@ func (c *CWSConsumer) sendStats() { for statsTags, counter := range c.ruleEngine.AutoSuppression.GetStats() { if counter > 0 { tags := []string{ - fmt.Sprintf("rule_id:%s", statsTags.RuleID), - fmt.Sprintf("suppression_type:%s", statsTags.SuppressionType), + "rule_id:" + statsTags.RuleID, + "suppression_type:" + statsTags.SuppressionType, } _ = c.statsdClient.Count(metrics.MetricRulesSuppressed, counter, tags, 1.0) } diff --git a/pkg/security/module/server.go b/pkg/security/module/server.go index 6a850166ec4c67..1ebd10548067d4 100644 --- a/pkg/security/module/server.go +++ b/pkg/security/module/server.go @@ -567,7 +567,7 @@ func (a *APIServer) getStats() map[string]int64 { func (a *APIServer) SendStats() error { // statistics about the number of dropped events for ruleID, val := range a.getStats() { - tags := []string{fmt.Sprintf("rule_id:%s", ruleID)} + tags := []string{"rule_id:" + ruleID} if val > 0 { if err := a.statsdClient.Count(metrics.MetricEventServerExpired, val, tags, 1.0); err != nil { return err @@ -603,7 +603,7 @@ func (a *APIServer) GetRuleSetReport(_ context.Context, _ *api.GetRuleSetReportP ruleSet := a.cwsConsumer.ruleEngine.GetRuleSet() if ruleSet == nil { - return nil, fmt.Errorf("failed to get loaded rule set") + return nil, errors.New("failed to get loaded rule set") } cfg := &pconfig.Config{ diff --git a/pkg/security/module/server_linux.go b/pkg/security/module/server_linux.go index ac992bdceadaac..af310ece92525d 100644 --- a/pkg/security/module/server_linux.go +++ b/pkg/security/module/server_linux.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "os" + "strconv" "github.com/DataDog/datadog-agent/pkg/security/probe" "github.com/DataDog/datadog-agent/pkg/security/proto/api" @@ -36,7 +37,7 @@ func (a *APIServer) DumpDiscarders(_ context.Context, _ *api.DumpDiscardersParam func (a *APIServer) DumpProcessCache(_ context.Context, params *api.DumpProcessCacheParams) (*api.SecurityDumpProcessCacheMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) if !ok { - return nil, fmt.Errorf("not supported") + return nil, errors.New("not supported") } var ( @@ -83,7 +84,7 @@ func (a *APIServer) DumpProcessCache(_ context.Context, params *api.DumpProcessC func (a *APIServer) DumpActivity(_ context.Context, params *api.ActivityDumpParams) (*api.ActivityDumpMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) if !ok { - return nil, fmt.Errorf("not supported") + return nil, errors.New("not supported") } if manager := p.GetProfileManager(); manager != nil { @@ -94,14 +95,14 @@ func (a *APIServer) DumpActivity(_ context.Context, params *api.ActivityDumpPara return msg, nil } - return nil, fmt.Errorf("monitor not configured") + return nil, errors.New("monitor not configured") } // ListActivityDumps returns the list of active dumps func (a *APIServer) ListActivityDumps(_ context.Context, params *api.ActivityDumpListParams) (*api.ActivityDumpListMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) if !ok { - return nil, fmt.Errorf("not supported") + return nil, errors.New("not supported") } if manager := p.GetProfileManager(); manager != nil { @@ -112,14 +113,14 @@ func (a *APIServer) ListActivityDumps(_ context.Context, params *api.ActivityDum return msg, nil } - return nil, fmt.Errorf("monitor not configured") + return nil, errors.New("monitor not configured") } // StopActivityDump stops an active activity dump if it exists func (a *APIServer) StopActivityDump(_ context.Context, params *api.ActivityDumpStopParams) (*api.ActivityDumpStopMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) if !ok { - return nil, fmt.Errorf("not supported") + return nil, errors.New("not supported") } if manager := p.GetProfileManager(); manager != nil { @@ -130,14 +131,14 @@ func (a *APIServer) StopActivityDump(_ context.Context, params *api.ActivityDump return msg, nil } - return nil, fmt.Errorf("monitor not configured") + return nil, errors.New("monitor not configured") } // TranscodingRequest encodes an activity dump following the requested parameters func (a *APIServer) TranscodingRequest(_ context.Context, params *api.TranscodingRequestParams) (*api.TranscodingRequestMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) if !ok { - return nil, fmt.Errorf("not supported") + return nil, errors.New("not supported") } if manager := p.GetProfileManager(); manager != nil { @@ -148,14 +149,14 @@ func (a *APIServer) TranscodingRequest(_ context.Context, params *api.Transcodin return msg, nil } - return nil, fmt.Errorf("monitor not configured") + return nil, errors.New("monitor not configured") } // ListSecurityProfiles returns the list of security profiles func (a *APIServer) ListSecurityProfiles(_ context.Context, params *api.SecurityProfileListParams) (*api.SecurityProfileListMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) if !ok { - return nil, fmt.Errorf("not supported") + return nil, errors.New("not supported") } if manager := p.GetProfileManager(); manager != nil { @@ -166,14 +167,14 @@ func (a *APIServer) ListSecurityProfiles(_ context.Context, params *api.Security return msg, nil } - return nil, fmt.Errorf("monitor not configured") + return nil, errors.New("monitor not configured") } // SaveSecurityProfile saves the requested security profile to disk func (a *APIServer) SaveSecurityProfile(_ context.Context, params *api.SecurityProfileSaveParams) (*api.SecurityProfileSaveMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) if !ok { - return nil, fmt.Errorf("not supported") + return nil, errors.New("not supported") } if manager := p.GetProfileManager(); manager != nil { @@ -184,7 +185,7 @@ func (a *APIServer) SaveSecurityProfile(_ context.Context, params *api.SecurityP return msg, nil } - return nil, fmt.Errorf("monitor not configured") + return nil, errors.New("monitor not configured") } func (a *APIServer) fillStatusPlatform(apiStatus *api.Status) error { @@ -227,7 +228,7 @@ func (a *APIServer) fillStatusPlatform(apiStatus *api.Status) error { func (a *APIServer) DumpNetworkNamespace(_ context.Context, params *api.DumpNetworkNamespaceParams) (*api.DumpNetworkNamespaceMessage, error) { p, ok := a.probe.PlatformProbe.(*probe.EBPFProbe) if !ok { - return nil, fmt.Errorf("not supported") + return nil, errors.New("not supported") } return p.Resolvers.NamespaceResolver.DumpNetworkNamespaces(params), nil @@ -284,7 +285,7 @@ func createSSHSessionPatcher(ev *model.Event, p *probe.Probe) sshSessionPatcher } // Create the user session context serializer userSessionCtx := &serializers.SSHSessionContextSerializer{ - SSHSessionID: fmt.Sprintf("%x", ev.ProcessContext.UserSession.SSHSessionID), + SSHSessionID: strconv.FormatUint(uint64(ev.ProcessContext.UserSession.SSHSessionID), 16), SSHClientPort: ev.ProcessContext.UserSession.SSHClientPort, SSHClientIP: ev.ProcessContext.UserSession.SSHClientIP.IP.String(), } diff --git a/pkg/security/probe/constantfetch/available.go b/pkg/security/probe/constantfetch/available.go index b907d9301b21e3..994dfc49617f1c 100644 --- a/pkg/security/probe/constantfetch/available.go +++ b/pkg/security/probe/constantfetch/available.go @@ -156,7 +156,7 @@ func checkAttachFuncProtoBpfMapEmbedStruct(spec *btf.Spec) (bool, error) { ty, ok := member.Type.(*btf.Struct) if !ok { - return false, fmt.Errorf("bpf_map.owner is not a struct") + return false, errors.New("bpf_map.owner is not a struct") } for _, ownerMember := range ty.Members { diff --git a/pkg/security/probe/constantfetch/btfhub.go b/pkg/security/probe/constantfetch/btfhub.go index 3558e83a3a663f..ba118bebdc935a 100644 --- a/pkg/security/probe/constantfetch/btfhub.go +++ b/pkg/security/probe/constantfetch/btfhub.go @@ -11,6 +11,7 @@ package constantfetch import ( _ "embed" // for go:embed "encoding/json" + "errors" "fmt" "runtime" "strings" @@ -109,12 +110,12 @@ type kernelInfos struct { func newKernelInfos(kv *kernel.Version) (*kernelInfos, error) { distribution, ok := kv.OsRelease["ID"] if !ok { - return nil, fmt.Errorf("failed to collect os-release ID") + return nil, errors.New("failed to collect os-release ID") } version, ok := kv.OsRelease["VERSION_ID"] if !ok { - return nil, fmt.Errorf("failed to collect os-release VERSION_ID") + return nil, errors.New("failed to collect os-release VERSION_ID") } // HACK: fix mapping of version for oracle-linux and amazon linux 2018 @@ -127,7 +128,7 @@ func newKernelInfos(kv *kernel.Version) (*kernelInfos, error) { arch, ok := archMapping[runtime.GOARCH] if !ok { - return nil, fmt.Errorf("failed to map runtime arch to btf arch") + return nil, errors.New("failed to map runtime arch to btf arch") } return &kernelInfos{ diff --git a/pkg/security/probe/constantfetch/btfhub/main.go b/pkg/security/probe/constantfetch/btfhub/main.go index 5d7849b3dedcfa..7dc8c842b648b8 100644 --- a/pkg/security/probe/constantfetch/btfhub/main.go +++ b/pkg/security/probe/constantfetch/btfhub/main.go @@ -13,6 +13,7 @@ import ( "bytes" "cmp" "crypto/sha256" + "encoding/hex" "encoding/json" "errors" "flag" @@ -420,5 +421,5 @@ outer: func computeCacheKey(b []byte) string { h := sha256.New() h.Write(b) - return fmt.Sprintf("%x", h.Sum(nil)) + return hex.EncodeToString(h.Sum(nil)) } diff --git a/pkg/security/probe/constantfetch/tracepoints.go b/pkg/security/probe/constantfetch/tracepoints.go index 76dc18ce5c165e..0b9cc204db9b80 100644 --- a/pkg/security/probe/constantfetch/tracepoints.go +++ b/pkg/security/probe/constantfetch/tracepoints.go @@ -38,7 +38,7 @@ func ReadTracepointFieldOffset(tracepoint string, field string) (uint64, error) } defer format.Close() - spaceField := fmt.Sprintf(" %s", field) + spaceField := " " + field scanner := bufio.NewScanner(format) for scanner.Scan() { diff --git a/pkg/security/probe/eventstream/monitor.go b/pkg/security/probe/eventstream/monitor.go index 873da1d52940f7..2bf3a2e8c7154e 100644 --- a/pkg/security/probe/eventstream/monitor.go +++ b/pkg/security/probe/eventstream/monitor.go @@ -10,6 +10,7 @@ package eventstream import ( "encoding/binary" + "errors" "fmt" "github.com/DataDog/datadog-go/v5/statsd" @@ -484,7 +485,7 @@ func (pbm *Monitor) sendEventsAndBytesReadStats(client statsd.ClientInterface) e tags := []string{pbm.config.StatsTagsCardinality, "", "", ""} for m := range pbm.eventStats { - tags[1] = fmt.Sprintf("map:%s", m) + tags[1] = "map:" + m for cpu := range pbm.eventStats[m] { for eventType := range pbm.eventStats[m][cpu] { evtType := model.EventType(eventType) @@ -528,7 +529,7 @@ func (pbm *Monitor) sendEventsAndBytesReadStats(client statsd.ClientInterface) e for mapName, causes := range pbm.invalidEventStats { for cause, stats := range causes { count, bytes := stats.getAndReset() - tags := []string{fmt.Sprintf("map:%s", mapName), fmt.Sprintf("cause:%s", InvalidEventCause(cause).String())} + tags := []string{"map:" + mapName, "cause:" + InvalidEventCause(cause).String()} if count > 0 { if err := client.Count(metrics.MetricPerfBufferInvalidEventsCount, int64(count), tags, 1.0); err != nil { return err @@ -550,7 +551,7 @@ func (pbm *Monitor) sendLostEventsReadStats(client statsd.ClientInterface) error for m := range pbm.readLostEvents { var total float64 - tags[1] = fmt.Sprintf("map:%s", m) + tags[1] = "map:" + m for cpu := range pbm.readLostEvents[m] { if count := float64(pbm.getAndResetReadLostCount(m, cpu)); count > 0 { @@ -572,7 +573,7 @@ func (pbm *Monitor) getRingbufUsage(statsMap *statMap) (uint64, error) { var ringUsage uint64 if err := statsMap.ebpfRingBufferMap.Lookup(int32(0), &ringUsage); err != nil { - return 0, fmt.Errorf("failed to retrieve ring buffer usage") + return 0, errors.New("failed to retrieve ring buffer usage") } return ringUsage, nil @@ -598,7 +599,7 @@ func (pbm *Monitor) collectAndSendKernelStats(client statsd.ClientInterface) err // total and perEvent are used for alerting var total uint64 perEvent := map[string]uint64{} - mapNameTag := fmt.Sprintf("map:%s", perfMapName) + mapNameTag := "map:" + perfMapName tags[1] = mapNameTag // loop through all the values of the active buffer diff --git a/pkg/security/probe/field_handlers_ebpf.go b/pkg/security/probe/field_handlers_ebpf.go index 9d431f8c92fd6f..6d2e7cf1fc961b 100644 --- a/pkg/security/probe/field_handlers_ebpf.go +++ b/pkg/security/probe/field_handlers_ebpf.go @@ -11,11 +11,13 @@ package probe import ( "crypto/sha256" "encoding/binary" + "encoding/hex" "fmt" "net" "net/netip" "path" "slices" + "strconv" "strings" "syscall" "time" @@ -978,7 +980,7 @@ func (fh *EBPFFieldHandlers) ResolveSetSockOptFilterHash(_ *model.Event, e *mode h := sha256.New() h.Write(e.RawFilter) bs := h.Sum(nil) - e.FilterHash = fmt.Sprintf("%x", bs) + e.FilterHash = hex.EncodeToString(bs) return e.FilterHash } return e.FilterHash @@ -1072,9 +1074,9 @@ func (fh *EBPFFieldHandlers) ResolveSessionID(e *model.Event, evtCtx *model.User fh.ResolveK8SUserSessionContext(e, &evtCtx.K8SSessionContext) var sessionID string if evtCtx.K8SSessionID != 0 { - sessionID = fmt.Sprintf("%x", evtCtx.K8SSessionID) + sessionID = strconv.FormatUint(uint64(evtCtx.K8SSessionID), 16) } else if evtCtx.SSHSessionID != 0 { - sessionID = fmt.Sprintf("%x", evtCtx.SSHSessionID) + sessionID = strconv.FormatUint(uint64(evtCtx.SSHSessionID), 16) } else { sessionID = "" } diff --git a/pkg/security/probe/field_handlers_ebpfless.go b/pkg/security/probe/field_handlers_ebpfless.go index 2767fe9952181f..1e7cd92f930f50 100644 --- a/pkg/security/probe/field_handlers_ebpfless.go +++ b/pkg/security/probe/field_handlers_ebpfless.go @@ -10,6 +10,7 @@ package probe import ( "crypto/sha256" + "encoding/hex" "fmt" "net" "slices" @@ -569,7 +570,7 @@ func (fh *EBPFLessFieldHandlers) ResolveSetSockOptFilterHash(_ *model.Event, e * h := sha256.New() h.Write(e.RawFilter) bs := h.Sum(nil) - e.FilterHash = fmt.Sprintf("%x", bs) + e.FilterHash = hex.EncodeToString(bs) return e.FilterHash } return e.FilterHash diff --git a/pkg/security/probe/monitors/approver/approver_monitor.go b/pkg/security/probe/monitors/approver/approver_monitor.go index 2c39fdf3b6a855..05e49b14141a39 100644 --- a/pkg/security/probe/monitors/approver/approver_monitor.go +++ b/pkg/security/probe/monitors/approver/approver_monitor.go @@ -71,7 +71,7 @@ func (d *Monitor) SendStats() error { } for eventType, stats := range statsByEventType { - eventTypeTag := fmt.Sprintf("event_type:%s", model.EventType(eventType).String()) + eventTypeTag := "event_type:" + model.EventType(eventType).String() categoryTag := fmt.Sprintf("category:%s", model.GetEventTypeCategory(model.EventType(eventType).String())) if stats.EventRejected != 0 { tagsForRejectedEvents := []string{ diff --git a/pkg/security/probe/monitors/discarder/discarder_monitor.go b/pkg/security/probe/monitors/discarder/discarder_monitor.go index b6200a45e9db35..baa53d5cfe109a 100644 --- a/pkg/security/probe/monitors/discarder/discarder_monitor.go +++ b/pkg/security/probe/monitors/discarder/discarder_monitor.go @@ -71,7 +71,7 @@ func (d *Monitor) SendStats() error { } else { tags = []string{ "discarder_type:event", - fmt.Sprintf("event_type:%s", model.EventType(eventType).String()), + "event_type:" + model.EventType(eventType).String(), } } diff --git a/pkg/security/probe/monitors/syscalls/syscalls_monitor.go b/pkg/security/probe/monitors/syscalls/syscalls_monitor.go index 6002bc401a0f7c..139bdee4513ca3 100644 --- a/pkg/security/probe/monitors/syscalls/syscalls_monitor.go +++ b/pkg/security/probe/monitors/syscalls/syscalls_monitor.go @@ -51,7 +51,7 @@ func (d *Monitor) SendStats() error { } for eventType, inflight := range statsByEventType { - eventTypeTag := fmt.Sprintf("event_type:%s", model.EventType(eventType).String()) + eventTypeTag := "event_type:" + model.EventType(eventType).String() tagsEvents := []string{ eventTypeTag, } diff --git a/pkg/security/probe/probe.go b/pkg/security/probe/probe.go index 2c7ef752f3d7b0..20730af4b25547 100644 --- a/pkg/security/probe/probe.go +++ b/pkg/security/probe/probe.go @@ -166,7 +166,7 @@ func (p *Probe) sendConsumerStats() error { dropped := consumer.eventDropped.Swap(0) if dropped > 0 { tags := []string{ - fmt.Sprintf("consumer_id:%s", consumer.consumer.ID()), + "consumer_id:" + consumer.consumer.ID(), } if err := p.StatsdClient.Count(metrics.MetricEventMonitoringEventsDropped, dropped, tags, 1.0); err != nil { return err @@ -188,8 +188,8 @@ func (p *Probe) SendStats() error { count := counter.Swap(0) if count > 0 { tags := []string{ - fmt.Sprintf("rule_id:%s", tags.ruleID), - fmt.Sprintf("action_name:%s", tags.actionName), + "rule_id:" + tags.ruleID, + "action_name:" + tags.actionName, } _ = p.StatsdClient.Count(metrics.MetricRuleActionPerformed, count, tags, 1.0) } diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index c65817cc94949f..e8e619299ff374 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -1896,8 +1896,8 @@ func (p *EBPFProbe) setApprovers(eventType eval.EventType, approvers rules.Appro for tags, count := range approverAddedMetricCounter { tags := []string{ - fmt.Sprintf("approver_type:%s", tags.approverType), - fmt.Sprintf("event_type:%s", tags.eventType), + "approver_type:" + tags.approverType, + "event_type:" + tags.eventType, } if err := p.statsdClient.Gauge(metrics.MetricApproverAdded, count, tags, 1.0); err != nil { diff --git a/pkg/security/probe/probe_kernel_file_windows.go b/pkg/security/probe/probe_kernel_file_windows.go index 310f72442020f1..2d5fdf68ac7018 100644 --- a/pkg/security/probe/probe_kernel_file_windows.go +++ b/pkg/security/probe/probe_kernel_file_windows.go @@ -819,7 +819,7 @@ func (wp *WindowsProbe) convertDrivePath(devicefilename string) (string, error) func (wp *WindowsProbe) mustConvertDrivePath(devicefilename string) (string, error) { if devicefilename == "\\FI_UNKNOWN" { - return "", fmt.Errorf("unknown device filename") + return "", errors.New("unknown device filename") } userPath, err := wp.convertDrivePath(devicefilename) diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index d4a58afb5847ec..aefad4d5be32c0 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -10,6 +10,7 @@ package probe import ( "context" + "errors" "fmt" "slices" "sync" @@ -252,7 +253,7 @@ func (p *ProcessKiller) isKillAllowed(kcs []killContext) (bool, error) { p.Lock() if !p.enabled { p.Unlock() - return false, fmt.Errorf("the enforcement capability is disabled") + return false, errors.New("the enforcement capability is disabled") } p.Unlock() diff --git a/pkg/security/probe/procfs/snapshot_bound_sockets.go b/pkg/security/probe/procfs/snapshot_bound_sockets.go index 92642bb720b012..3e6ff30f568211 100644 --- a/pkg/security/probe/procfs/snapshot_bound_sockets.go +++ b/pkg/security/probe/procfs/snapshot_bound_sockets.go @@ -56,7 +56,7 @@ func (bss *BoundSocketSnapshotter) GetBoundSockets(p *process.Process) ([]model. } } - link, err := os.Readlink(kernel.HostProc(fmt.Sprintf("%d", p.Pid), "ns/net")) + link, err := os.Readlink(kernel.HostProc(strconv.Itoa(int(p.Pid)), "ns/net")) if err != nil { return nil, err } @@ -178,7 +178,7 @@ type netIPEntry struct { } func parseNetIP(pid int32, suffix string) ([]netIPEntry, error) { - path := kernel.HostProc(fmt.Sprintf("%d", pid), suffix) + path := kernel.HostProc(strconv.Itoa(int(pid)), suffix) f, err := os.Open(path) if err != nil { return nil, err diff --git a/pkg/security/probe/selftests/chmod.go b/pkg/security/probe/selftests/chmod.go index 70c31e0d40f026..9215dc48208453 100644 --- a/pkg/security/probe/selftests/chmod.go +++ b/pkg/security/probe/selftests/chmod.go @@ -27,7 +27,7 @@ type ChmodSelfTest struct { // GetRuleDefinition returns the rule func (o *ChmodSelfTest) GetRuleDefinition() *rules.RuleDefinition { - o.ruleID = fmt.Sprintf("%s_chmod", ruleIDPrefix) + o.ruleID = ruleIDPrefix + "_chmod" return &rules.RuleDefinition{ ID: o.ruleID, diff --git a/pkg/security/probe/selftests/chown.go b/pkg/security/probe/selftests/chown.go index 71f0a497149137..86e3f754e53920 100644 --- a/pkg/security/probe/selftests/chown.go +++ b/pkg/security/probe/selftests/chown.go @@ -28,7 +28,7 @@ type ChownSelfTest struct { // GetRuleDefinition returns the rule func (o *ChownSelfTest) GetRuleDefinition() *rules.RuleDefinition { - o.ruleID = fmt.Sprintf("%s_chown", ruleIDPrefix) + o.ruleID = ruleIDPrefix + "_chown" return &rules.RuleDefinition{ ID: o.ruleID, diff --git a/pkg/security/probe/selftests/create_file_windows.go b/pkg/security/probe/selftests/create_file_windows.go index 5a0ce80cbb1bbc..5f7a817f67bfd5 100644 --- a/pkg/security/probe/selftests/create_file_windows.go +++ b/pkg/security/probe/selftests/create_file_windows.go @@ -26,7 +26,7 @@ type WindowsCreateFileSelfTest struct { // GetRuleDefinition returns the rule func (o *WindowsCreateFileSelfTest) GetRuleDefinition() *rules.RuleDefinition { - o.ruleID = fmt.Sprintf("%s_windows_create_file", ruleIDPrefix) + o.ruleID = ruleIDPrefix + "_windows_create_file" basename := filepath.Base(o.filename) devicePath := o.filename diff --git a/pkg/security/probe/selftests/ebpfless.go b/pkg/security/probe/selftests/ebpfless.go index a6737c3fcc6a36..78708f3949b648 100644 --- a/pkg/security/probe/selftests/ebpfless.go +++ b/pkg/security/probe/selftests/ebpfless.go @@ -10,7 +10,6 @@ package selftests import ( "context" - "fmt" "math" "time" @@ -26,7 +25,7 @@ type EBPFLessSelfTest struct { // GetRuleDefinition returns the rule func (o *EBPFLessSelfTest) GetRuleDefinition() *rules.RuleDefinition { - o.ruleID = fmt.Sprintf("%s_exec", ruleIDPrefix) + o.ruleID = ruleIDPrefix + "_exec" return &rules.RuleDefinition{ ID: o.ruleID, diff --git a/pkg/security/probe/selftests/open.go b/pkg/security/probe/selftests/open.go index 45d58357b19d51..4f934fadf5ad98 100644 --- a/pkg/security/probe/selftests/open.go +++ b/pkg/security/probe/selftests/open.go @@ -27,7 +27,7 @@ type OpenSelfTest struct { // GetRuleDefinition returns the rule func (o *OpenSelfTest) GetRuleDefinition() *rules.RuleDefinition { - o.ruleID = fmt.Sprintf("%s_open", ruleIDPrefix) + o.ruleID = ruleIDPrefix + "_open" return &rules.RuleDefinition{ ID: o.ruleID, diff --git a/pkg/security/probe/selftests/open_registry_key_windows.go b/pkg/security/probe/selftests/open_registry_key_windows.go index 916a59c1458a48..28f0551ddd2c30 100644 --- a/pkg/security/probe/selftests/open_registry_key_windows.go +++ b/pkg/security/probe/selftests/open_registry_key_windows.go @@ -27,7 +27,7 @@ type WindowsOpenRegistryKeyTest struct { // GetRuleDefinition returns the rule func (o *WindowsOpenRegistryKeyTest) GetRuleDefinition() *rules.RuleDefinition { - o.ruleID = fmt.Sprintf("%s_windows_open_registry_key_name", ruleIDPrefix) + o.ruleID = ruleIDPrefix + "_windows_open_registry_key_name" return &rules.RuleDefinition{ ID: o.ruleID, diff --git a/pkg/security/probe/selftests/tester.go b/pkg/security/probe/selftests/tester.go index 7086f1b4d20596..ab50be37dbc7cb 100644 --- a/pkg/security/probe/selftests/tester.go +++ b/pkg/security/probe/selftests/tester.go @@ -8,7 +8,7 @@ package selftests import ( "context" - "fmt" + "errors" "os" "sync" "time" @@ -234,7 +234,7 @@ func (t *SelfTester) beginSelfTests(timeout time.Duration) error { select { case t.selfTestRunning <- timeout: default: - return fmt.Errorf("channel is already full, self test is already running") + return errors.New("channel is already full, self test is already running") } t.waitingForEvent.Store(true) diff --git a/pkg/security/probe/selftests/tester_windows.go b/pkg/security/probe/selftests/tester_windows.go index e73b80d279f168..3eacea809a9117 100644 --- a/pkg/security/probe/selftests/tester_windows.go +++ b/pkg/security/probe/selftests/tester_windows.go @@ -7,7 +7,7 @@ package selftests import ( - "fmt" + "errors" "path/filepath" "time" @@ -23,7 +23,7 @@ import ( func NewSelfTester(cfg *config.RuntimeSecurityConfig, probe *probe.Probe) (*SelfTester, error) { if !cfg.FIMEnabled { - return nil, fmt.Errorf("FIM is disabled") + return nil, errors.New("FIM is disabled") } var ( selfTests []SelfTest diff --git a/pkg/security/probe/sysctl/snapshot.go b/pkg/security/probe/sysctl/snapshot.go index 2e1e456f3e7333..d7971c9421f460 100644 --- a/pkg/security/probe/sysctl/snapshot.go +++ b/pkg/security/probe/sysctl/snapshot.go @@ -13,6 +13,7 @@ import ( "bytes" "compress/gzip" "encoding/json" + "errors" "fmt" "io" "io/fs" @@ -264,7 +265,7 @@ func (s *Snapshot) getKernelConfigPath() (string, error) { if _, err := os.Stat(procConfigGZ); err == nil { return procConfigGZ, nil } - return "", fmt.Errorf("kernel config not found") + return "", errors.New("kernel config not found") } func (s *Snapshot) parseKernelConfig(r io.Reader, kernelCompilationFlags map[string]uint8) error { diff --git a/pkg/security/process_list/process_list.go b/pkg/security/process_list/process_list.go index c50127a47abc5a..2548893d827fc6 100644 --- a/pkg/security/process_list/process_list.go +++ b/pkg/security/process_list/process_list.go @@ -167,11 +167,11 @@ func (pl *ProcessList) isEventValid(event *model.Event) (bool, error) { case model.IMDSEventType: // ignore IMDS answers without AccessKeyIDS if event.IMDS.Type == model.IMDSResponseType && len(event.IMDS.AWS.SecurityCredentials.AccessKeyID) == 0 { - return false, fmt.Errorf("untraced event: IMDS response without credentials") + return false, errors.New("untraced event: IMDS response without credentials") } // ignore IMDS requests without URLs if event.IMDS.Type == model.IMDSRequestType && len(event.IMDS.URL) == 0 { - return false, fmt.Errorf("invalid event: IMDS request without any URL") + return false, errors.New("invalid event: IMDS request without any URL") } } return true, nil diff --git a/pkg/security/ptracer/cws.go b/pkg/security/ptracer/cws.go index 53cef064a2eaf4..73dedcfccfeb1c 100644 --- a/pkg/security/ptracer/cws.go +++ b/pkg/security/ptracer/cws.go @@ -212,7 +212,7 @@ func (ctx *CWSPtracerCtx) waitClientToBeReady() error { for { select { case <-ctx.cancel.Done(): - return fmt.Errorf("Exiting") + return errors.New("Exiting") case ready := <-ctx.clientReady: if !ready { time.Sleep(time.Second) @@ -244,7 +244,7 @@ func (ctx *CWSPtracerCtx) sendMessagesLoop() error { for { select { case <-ctx.cancel.Done(): - return fmt.Errorf("Exiting") + return errors.New("Exiting") case data := <-ctx.msgDataChan: if err := ctx.sendMsgData(data); err != nil { logger.Debugf("error sending msg: %v", err) @@ -333,7 +333,7 @@ func (ctx *CWSPtracerCtx) initCtxCommon() error { func initCWSPtracerWrapp(args []string, envs []string, probeAddr string, opts Opts) (*CWSPtracerCtx, error) { if len(args) == 0 { - return nil, fmt.Errorf("an executable is required") + return nil, errors.New("an executable is required") } entry, err := checkEntryPoint(args[0]) if err != nil { diff --git a/pkg/security/resolvers/file/elf.go b/pkg/security/resolvers/file/elf.go index 991915947784ca..703444c361be0b 100644 --- a/pkg/security/resolvers/file/elf.go +++ b/pkg/security/resolvers/file/elf.go @@ -6,7 +6,7 @@ package file import ( - "fmt" + "errors" "os" "github.com/DataDog/datadog-agent/pkg/security/secl/model" @@ -20,7 +20,7 @@ func isELF(header []byte, fileSize int64) bool { func getELFInfoFromHeader(header []byte) (model.ABI, model.Architecture, error) { if len(header) < 20 { - return model.UnknownABI, model.UnknownArch, fmt.Errorf("header too short") + return model.UnknownABI, model.UnknownArch, errors.New("header too short") } // Get ABI from EI_CLASS (byte 4) @@ -42,7 +42,7 @@ func getELFInfoFromHeader(header []byte) (model.ABI, model.Architecture, error) case 2: // ELFDATA2MSB (big-endian) machine = uint16(header[18])<<8 | uint16(header[19]) default: - return abi, model.UnknownArch, fmt.Errorf("unknown endianness") + return abi, model.UnknownArch, errors.New("unknown endianness") } // Get architecture from e_machine (bytes 18-19) diff --git a/pkg/security/resolvers/file/pe.go b/pkg/security/resolvers/file/pe.go index d5ffb2277b5f46..69514673b0df18 100644 --- a/pkg/security/resolvers/file/pe.go +++ b/pkg/security/resolvers/file/pe.go @@ -9,7 +9,7 @@ import ( "bytes" "debug/pe" "encoding/binary" - "fmt" + "errors" "os" "strings" @@ -38,7 +38,7 @@ func determinePEArchitectureFromHeader(file *os.File, data []byte) (model.ABI, m } if !bytes.Equal(signature[0:4], []byte{0x50, 0x45, 0x00, 0x00}) { - return model.UnknownABI, model.UnknownArch, fmt.Errorf("invalid PE signature") + return model.UnknownABI, model.UnknownArch, errors.New("invalid PE signature") } magic := binary.LittleEndian.Uint16(signature[4:]) @@ -95,7 +95,7 @@ func readDLLName(file *os.File, nameOffset uint32) (string, error) { } } if end == 0 { - return "", fmt.Errorf("invalid DLL name") + return "", errors.New("invalid DLL name") } return strings.ToLower(string(nameData[:end])), nil diff --git a/pkg/security/resolvers/hash/resolver_linux.go b/pkg/security/resolvers/hash/resolver_linux.go index d3159140ba4d5e..4503c2a2eea142 100644 --- a/pkg/security/resolvers/hash/resolver_linux.go +++ b/pkg/security/resolvers/hash/resolver_linux.go @@ -37,7 +37,7 @@ import ( var ( // ErrSizeLimitReached indicates that the size limit was reached - ErrSizeLimitReached = fmt.Errorf("size limit reached") + ErrSizeLimitReached = errors.New("size limit reached") ) // SizeLimitedWriter implements io.Writer and returns an error if more than the configured amount of data is read diff --git a/pkg/security/resolvers/netns/resolver.go b/pkg/security/resolvers/netns/resolver.go index c7d9df14035037..b41e49ccef593a 100644 --- a/pkg/security/resolvers/netns/resolver.go +++ b/pkg/security/resolvers/netns/resolver.go @@ -11,6 +11,7 @@ package netns import ( "context" "encoding/json" + "errors" "fmt" "os" "strings" @@ -37,7 +38,7 @@ import ( var ( // ErrNoNetworkNamespaceHandle is used to indicate that we haven't resolved a handle for the requested network // namespace yet. - ErrNoNetworkNamespaceHandle = fmt.Errorf("no network namespace handle") + ErrNoNetworkNamespaceHandle = errors.New("no network namespace handle") // lonelyNamespaceTimeout is the timeout past which a lonely network namespace is expired lonelyNamespaceTimeout = 30 * time.Second diff --git a/pkg/security/resolvers/process/resolver_ebpf.go b/pkg/security/resolvers/process/resolver_ebpf.go index 967a1440c7ce3b..bc6eaf9e46d52d 100644 --- a/pkg/security/resolvers/process/resolver_ebpf.go +++ b/pkg/security/resolvers/process/resolver_ebpf.go @@ -360,7 +360,7 @@ func (p *EBPFResolver) ApplyExitEntry(event *model.Event, newEntryCb func(*model func (p *EBPFResolver) enrichEventFromProcfs(entry *model.ProcessCacheEntry, proc *process.Process, filledProc *utils.FilledProcess) error { // the provided process is a kernel process if its virtual memory size is null if filledProc.MemInfo.VMS == 0 { - return fmt.Errorf("cannot snapshot kernel threads") + return errors.New("cannot snapshot kernel threads") } pid := uint32(proc.Pid) diff --git a/pkg/security/resolvers/sbom/collectorv2/dpkg.go b/pkg/security/resolvers/sbom/collectorv2/dpkg.go index e81bee553ccb47..3af9569529ac6f 100644 --- a/pkg/security/resolvers/sbom/collectorv2/dpkg.go +++ b/pkg/security/resolvers/sbom/collectorv2/dpkg.go @@ -187,7 +187,7 @@ func (s *dpkgScanner) parseInfoFile(root *os.Root, path string) ([]string, error // so we cut on the first space and then trim the path _, installedPath, ok := strings.Cut(scanner.Text(), " ") if !ok { - return nil, fmt.Errorf("failed to parse installed file line, bad format") + return nil, errors.New("failed to parse installed file line, bad format") } installedPath = strings.TrimSpace(installedPath) installedFiles = append(installedFiles, "/"+installedPath) diff --git a/pkg/security/resolvers/securitydescriptors/resolver_windows.go b/pkg/security/resolvers/securitydescriptors/resolver_windows.go index 7245725b420206..c3d40c0acd066d 100644 --- a/pkg/security/resolvers/securitydescriptors/resolver_windows.go +++ b/pkg/security/resolvers/securitydescriptors/resolver_windows.go @@ -7,6 +7,7 @@ package securitydescriptors import ( + "errors" "fmt" "regexp" "strings" @@ -120,18 +121,18 @@ func (resolver *Resolver) GetHumanReadableSD(sddl string) (string, error) { re := regexp.MustCompile(`\(([^\)]+)\)`) matches := re.FindAllStringSubmatch(sddl, -1) if matches == nil { - return "", fmt.Errorf("no ACEs found in DACL") + return "", errors.New("no ACEs found in DACL") } builder.WriteString("DACL:\n") for _, match := range matches { if len(match) != 2 { - return "", fmt.Errorf("invalid ACE format") + return "", errors.New("invalid ACE format") } ace := match[1] fields := strings.Split(ace, ";") if len(fields) != 6 { - return "", fmt.Errorf("invalid ACE format") + return "", errors.New("invalid ACE format") } aceType := fields[0] diff --git a/pkg/security/resolvers/tags/resolver.go b/pkg/security/resolvers/tags/resolver.go index 8840bc30ff74c8..3695982ff24f06 100644 --- a/pkg/security/resolvers/tags/resolver.go +++ b/pkg/security/resolvers/tags/resolver.go @@ -8,6 +8,7 @@ package tags import ( "context" + "errors" "fmt" "github.com/DataDog/datadog-agent/comp/core/tagger/types" @@ -60,22 +61,22 @@ func (t *DefaultResolver) ResolveWithErr(id containerutils.WorkloadID) ([]string // resolveWorkloadTags resolves tags for a workload ID, handling both container and cgroup workloads func (t *DefaultResolver) resolveWorkloadTags(id containerutils.WorkloadID) ([]string, error) { if id == nil { - return nil, fmt.Errorf("nil workload id") + return nil, errors.New("nil workload id") } switch v := id.(type) { case containerutils.ContainerID: if len(v) == 0 { - return nil, fmt.Errorf("empty container id") + return nil, errors.New("empty container id") } // Resolve as a container ID return GetTagsOfContainer(t.tagger, v) case containerutils.CGroupID: if len(v) == 0 { - return nil, fmt.Errorf("empty cgroup id") + return nil, errors.New("empty cgroup id") } // CGroup resolution is only supported on Linux - return nil, fmt.Errorf("cgroup resolution not supported on this platform") + return nil, errors.New("cgroup resolution not supported on this platform") default: return nil, fmt.Errorf("unknown workload id type: %T", id) } diff --git a/pkg/security/resolvers/tags/resolver_linux.go b/pkg/security/resolvers/tags/resolver_linux.go index 24709d76ad6470..bbc201134e471f 100644 --- a/pkg/security/resolvers/tags/resolver_linux.go +++ b/pkg/security/resolvers/tags/resolver_linux.go @@ -8,6 +8,7 @@ package tags import ( "context" + "errors" "fmt" "path/filepath" "time" @@ -197,19 +198,19 @@ func (t *LinuxResolver) ResolveWithErr(id containerutils.WorkloadID) ([]string, // resolveWorkloadTags overrides the default implementation to handle CGroup resolution on Linux func (t *LinuxResolver) resolveWorkloadTags(id containerutils.WorkloadID) ([]string, error) { if id == nil { - return nil, fmt.Errorf("nil workload id") + return nil, errors.New("nil workload id") } switch v := id.(type) { case containerutils.ContainerID: if len(v) == 0 { - return nil, fmt.Errorf("empty container id") + return nil, errors.New("empty container id") } // Resolve as a container ID return GetTagsOfContainer(t.tagger, v) case containerutils.CGroupID: if len(v) == 0 { - return nil, fmt.Errorf("empty cgroup id") + return nil, errors.New("empty cgroup id") } // Generate systemd service tags for cgroup workloads tags := t.getCGroupTags(v) diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index 80070be5a9f8ea..97e23eb3879629 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -261,8 +261,8 @@ func (e *RuleEngine) StartRunningMetrics(ctx context.Context) { return case <-heartbeatTicker.C: tags := []string{ - fmt.Sprintf("version:%s", version.AgentVersion), - fmt.Sprintf("os:%s", runtime.GOOS), + "version:" + version.AgentVersion, + "os:" + runtime.GOOS, constants.CardinalityTagPrefix + "none", } @@ -291,7 +291,7 @@ func (e *RuleEngine) StartRunningMetrics(ctx context.Context) { e.RLock() for _, version := range e.policiesVersions { - tags = append(tags, fmt.Sprintf("policies_version:%s", version)) + tags = append(tags, "policies_version:"+version) } e.RUnlock() diff --git a/pkg/security/secl/compiler/eval/errors.go b/pkg/security/secl/compiler/eval/errors.go index 1be678c4b9fad2..0ebe76543dbc21 100644 --- a/pkg/security/secl/compiler/eval/errors.go +++ b/pkg/security/secl/compiler/eval/errors.go @@ -109,7 +109,7 @@ func (e *ErrRuleParse) Error() string { column-- } - str := fmt.Sprintf("%s\n", e.expr) + str := e.expr + "\n" str += strings.Repeat(" ", column) str += "^" return str diff --git a/pkg/security/secl/compiler/eval/eval.go b/pkg/security/secl/compiler/eval/eval.go index 9c964c577096af..bdef0b83db50d1 100644 --- a/pkg/security/secl/compiler/eval/eval.go +++ b/pkg/security/secl/compiler/eval/eval.go @@ -296,14 +296,14 @@ func stringEvaluatorFromVariable(str string, pos lexer.Position, opts *Opts, sta case *IntArrayEvaluator: evaluators = append(evaluators, &StringEvaluator{ EvalFnc: func(ctx *Context) string { - var result string + var builder strings.Builder for i, number := range evaluator.EvalFnc(ctx) { if i != 0 { - result += "," + builder.WriteString(",") } - result += strconv.FormatInt(int64(number), 10) + builder.WriteString(strconv.FormatInt(int64(number), 10)) } - return result + return builder.String() }}) case *StringEvaluator: evaluators = append(evaluators, evaluator) @@ -344,15 +344,15 @@ func stringEvaluatorFromVariable(str string, pos lexer.Position, opts *Opts, sta Value: str, ValueType: VariableValueType, EvalFnc: func(ctx *Context) string { - var result string + var builder strings.Builder for _, evaluator := range evaluators { if evaluator.EvalFnc != nil { - result += evaluator.EvalFnc(ctx) + builder.WriteString(evaluator.EvalFnc(ctx)) } else { - result += evaluator.Value + builder.WriteString(evaluator.Value) } } - return result + return builder.String() }, }, pos, nil } diff --git a/pkg/security/secl/compiler/eval/eval_test.go b/pkg/security/secl/compiler/eval/eval_test.go index b930d2470dfc4a..d2f1aa09d34121 100644 --- a/pkg/security/secl/compiler/eval/eval_test.go +++ b/pkg/security/secl/compiler/eval/eval_test.go @@ -613,7 +613,7 @@ func TestConstants(t *testing.T) { if err == nil { var msg string if len(test.Message) > 0 { - msg = fmt.Sprintf(": reason: %s", test.Message) + msg = ": reason: " + test.Message } t.Fatalf("expected an error for `%s`%s", test.Expr, msg) } diff --git a/pkg/security/secl/compiler/eval/model_test.go b/pkg/security/secl/compiler/eval/model_test.go index 86132d8f706965..89d4362f7f41c6 100644 --- a/pkg/security/secl/compiler/eval/model_test.go +++ b/pkg/security/secl/compiler/eval/model_test.go @@ -8,6 +8,7 @@ package eval import ( "container/list" + "errors" "fmt" "net" "reflect" @@ -622,7 +623,7 @@ func (m *testModel) GetEvaluator(field Field, regID RegisterID, offset int) (Eva } else if b.Field == "event.title" { titleEvaluator, errTitleEvaluator = StringEquals(titleStringEvaluator, a, state) } else { - return nil, fmt.Errorf("at least one evaluator must be event.title") + return nil, errors.New("at least one evaluator must be event.title") } if errTitleEvaluator != nil { return nil, errTitleEvaluator @@ -653,7 +654,7 @@ func (m *testModel) GetEvaluator(field Field, regID RegisterID, offset int) (Eva } else if b.Field == "event.title" { upperEvaluator, errUpperEvaluator = StringEquals(upperCaseStringEvaluator, a, state) } else { - return nil, fmt.Errorf("at least one evaluator must be event.title") + return nil, errors.New("at least one evaluator must be event.title") } if errUpperEvaluator != nil { return nil, errUpperEvaluator diff --git a/pkg/security/secl/compiler/eval/strings.go b/pkg/security/secl/compiler/eval/strings.go index 611131a0cc1bf0..bf3fae4241f1f0 100644 --- a/pkg/security/secl/compiler/eval/strings.go +++ b/pkg/security/secl/compiler/eval/strings.go @@ -219,7 +219,7 @@ type PatternStringMatcher struct { func (p *PatternStringMatcher) Compile(pattern string, caseInsensitive bool) error { // ** are not allowed in normal patterns if strings.Contains(pattern, "**") { - return fmt.Errorf("`**` is not allowed in patterns") + return errors.New("`**` is not allowed in patterns") } p.pattern = newPatternElement(pattern) diff --git a/pkg/security/secl/model/consts_linux.go b/pkg/security/secl/model/consts_linux.go index 54e1f7acaa5b29..de96fd8b6a3e19 100644 --- a/pkg/security/secl/model/consts_linux.go +++ b/pkg/security/secl/model/consts_linux.go @@ -11,6 +11,7 @@ import ( "math" "math/bits" "sort" + "strconv" "strings" "syscall" @@ -1432,7 +1433,7 @@ func bitmaskToStringArray(bitmask int, intToStrMap map[int]string) []string { } if result != bitmask { - strs = append(strs, fmt.Sprintf("%d", bitmask&^result)) + strs = append(strs, strconv.Itoa(bitmask&^result)) } sort.Strings(strs) @@ -1459,7 +1460,7 @@ func bitmaskU64ToStringArray(bitmask uint64, intToStrMap map[uint64]string) []st } if result != bitmask { - strs = append(strs, fmt.Sprintf("%d", bitmask&^result)) + strs = append(strs, strconv.FormatUint(bitmask&^result, 10)) } sort.Strings(strs) diff --git a/pkg/security/secl/model/model_test.go b/pkg/security/secl/model/model_test.go index 44a7f254ddd4d3..36b542ea791c90 100644 --- a/pkg/security/secl/model/model_test.go +++ b/pkg/security/secl/model/model_test.go @@ -21,15 +21,17 @@ import ( func TestPathValidation(t *testing.T) { mod := &Model{} - var maxDepthPath string + var maxDepthPathBuilder strings.Builder for i := 0; i <= MaxPathDepth; i++ { - maxDepthPath += "a/" + maxDepthPathBuilder.WriteString("a/") } + maxDepthPath := maxDepthPathBuilder.String() - var maxSegmentPath string + var maxSegmentPathBuilder strings.Builder for i := 0; i <= MaxSegmentLength; i++ { - maxSegmentPath += "a" + maxSegmentPathBuilder.WriteString("a") } + maxSegmentPath := maxSegmentPathBuilder.String() tests := []struct { val string diff --git a/pkg/security/secl/rules/filter/seclrulefilter.go b/pkg/security/secl/rules/filter/seclrulefilter.go index b4e182a732cbdd..4b6cf6c4277135 100644 --- a/pkg/security/secl/rules/filter/seclrulefilter.go +++ b/pkg/security/secl/rules/filter/seclrulefilter.go @@ -8,6 +8,7 @@ package filter import ( "runtime" + "strings" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/ast" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" @@ -28,14 +29,16 @@ func NewSECLRuleFilter(model eval.Model) *SECLRuleFilter { } func mergeFilterExpressions(filters []string) string { - var expression string + var builder strings.Builder for i, filter := range filters { if i != 0 { - expression += " || " + builder.WriteString(" || ") } - expression += "(" + filter + ")" + builder.WriteString("(") + builder.WriteString(filter) + builder.WriteString(")") } - return expression + return builder.String() } func (r *SECLRuleFilter) newEvalContext() eval.Context { diff --git a/pkg/security/secl/rules/fim_unix.go b/pkg/security/secl/rules/fim_unix.go index 805e8834c3858c..2b5c50b53bb64b 100644 --- a/pkg/security/secl/rules/fim_unix.go +++ b/pkg/security/secl/rules/fim_unix.go @@ -30,7 +30,7 @@ func expandFim(baseID, groupID, baseExpr string) []expandedRule { var expandedRules []expandedRule for _, eventType := range []string{"open", "chmod", "chown", "link", "rename", "unlink", "utimes"} { - expr := strings.ReplaceAll(baseExpr, "fim.write.file.", fmt.Sprintf("%s.file.", eventType)) + expr := strings.ReplaceAll(baseExpr, "fim.write.file.", eventType+".file.") if eventType == "open" { expr = fmt.Sprintf("(%s) && open.flags & (O_CREAT|O_TRUNC|O_APPEND|O_RDWR|O_WRONLY) > 0", expr) } diff --git a/pkg/security/secl/rules/model.go b/pkg/security/secl/rules/model.go index eef58289af6e2d..e0cefd73d4d637 100644 --- a/pkg/security/secl/rules/model.go +++ b/pkg/security/secl/rules/model.go @@ -260,11 +260,11 @@ func (s *SetDefinition) PreCheck(_ PolicyLoaderOpts) error { } if s.Inherited && s.Scope != "process" { - return fmt.Errorf("only variables scoped to process can be marked as inherited") + return errors.New("only variables scoped to process can be marked as inherited") } if len(s.ScopeField) > 0 && s.Scope != "process" { - return fmt.Errorf("only variables scoped to process can have a custom scope_field") + return errors.New("only variables scoped to process can have a custom scope_field") } return nil diff --git a/pkg/security/secl/rules/policy_loader_test.go b/pkg/security/secl/rules/policy_loader_test.go index a55d3c9291767e..493f8174a7d4e0 100644 --- a/pkg/security/secl/rules/policy_loader_test.go +++ b/pkg/security/secl/rules/policy_loader_test.go @@ -7,7 +7,7 @@ package rules import ( - "fmt" + "errors" "testing" "github.com/google/go-cmp/cmp" @@ -413,7 +413,7 @@ func TestPolicyLoader_LoadPolicies(t *testing.T) { dummyLoadPoliciesFunc: func() ([]*Policy, *multierror.Error) { var errs *multierror.Error - errs = multierror.Append(errs, &ErrPolicyLoad{Name: "myRC.policy", Source: PolicyProviderTypeRC, Err: fmt.Errorf(`yaml: unmarshal error`)}) + errs = multierror.Append(errs, &ErrPolicyLoad{Name: "myRC.policy", Source: PolicyProviderTypeRC, Err: errors.New(`yaml: unmarshal error`)}) return nil, errs }, }, @@ -468,7 +468,7 @@ func TestPolicyLoader_LoadPolicies(t *testing.T) { }, wantErr: func(t assert.TestingT, err *multierror.Error, _ ...interface{}) bool { return assert.Equal(t, err, &multierror.Error{Errors: []error{ - &ErrPolicyLoad{Name: "myRC.policy", Source: PolicyProviderTypeRC, Err: fmt.Errorf(`yaml: unmarshal error`)}, + &ErrPolicyLoad{Name: "myRC.policy", Source: PolicyProviderTypeRC, Err: errors.New(`yaml: unmarshal error`)}, }}, "Expected no errors but got %+v", err) }, }, @@ -504,7 +504,7 @@ func TestPolicyLoader_LoadPolicies(t *testing.T) { dummyLoadPoliciesFunc: func() ([]*Policy, *multierror.Error) { var errs *multierror.Error - errs = multierror.Append(errs, &ErrPolicyLoad{Name: "myRC.policy", Source: PolicyProviderTypeRC, Err: fmt.Errorf(`EOF`)}) + errs = multierror.Append(errs, &ErrPolicyLoad{Name: "myRC.policy", Source: PolicyProviderTypeRC, Err: errors.New(`EOF`)}) return nil, errs }, }, @@ -557,7 +557,7 @@ func TestPolicyLoader_LoadPolicies(t *testing.T) { wantErr: func(t assert.TestingT, err *multierror.Error, _ ...interface{}) bool { return assert.Equal(t, err, &multierror.Error{ Errors: []error{ - &ErrPolicyLoad{Name: "myRC.policy", Source: PolicyProviderTypeRC, Err: fmt.Errorf(`EOF`)}, + &ErrPolicyLoad{Name: "myRC.policy", Source: PolicyProviderTypeRC, Err: errors.New(`EOF`)}, }}) }, }, diff --git a/pkg/security/secl/rules/policy_test.go b/pkg/security/secl/rules/policy_test.go index 5436099eff6bfa..ab3c8087292c24 100644 --- a/pkg/security/secl/rules/policy_test.go +++ b/pkg/security/secl/rules/policy_test.go @@ -9,6 +9,7 @@ package rules import ( + "errors" "fmt" "net" "net/http" @@ -2443,7 +2444,7 @@ func TestLoadPolicy(t *testing.T) { }, want: nil, wantErr: func(t assert.TestingT, err error, _ ...interface{}) bool { - return assert.Error(t, err, &ErrPolicyLoad{Name: "myLocal.policy", Source: PolicyProviderTypeRC, Err: fmt.Errorf(`EOF`)}) + return assert.Error(t, err, &ErrPolicyLoad{Name: "myLocal.policy", Source: PolicyProviderTypeRC, Err: errors.New(`EOF`)}) }, }, { @@ -2459,7 +2460,7 @@ func TestLoadPolicy(t *testing.T) { }, want: nil, wantErr: func(t assert.TestingT, err error, _ ...interface{}) bool { - return assert.Error(t, err, &ErrPolicyLoad{Name: "myLocal.policy", Source: PolicyProviderTypeRC, Err: fmt.Errorf(`EOF`)}) + return assert.Error(t, err, &ErrPolicyLoad{Name: "myLocal.policy", Source: PolicyProviderTypeRC, Err: errors.New(`EOF`)}) }, }, { @@ -2497,7 +2498,7 @@ broken }, want: nil, wantErr: func(t assert.TestingT, err error, _ ...interface{}) bool { - return assert.ErrorContains(t, err, (&ErrPolicyLoad{Name: "myLocal.policy", Source: PolicyProviderTypeRC, Err: fmt.Errorf(`yaml: unmarshal error`)}).Error()) + return assert.ErrorContains(t, err, (&ErrPolicyLoad{Name: "myLocal.policy", Source: PolicyProviderTypeRC, Err: errors.New(`yaml: unmarshal error`)}).Error()) }, }, { diff --git a/pkg/security/security_profile/activity_tree/activity_tree.go b/pkg/security/security_profile/activity_tree/activity_tree.go index 2b717ad74d3a06..83ba33b5f6b3e1 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree.go +++ b/pkg/security/security_profile/activity_tree/activity_tree.go @@ -331,11 +331,11 @@ func (at *ActivityTree) isEventValid(event *model.Event, dryRun bool) (bool, err case model.IMDSEventType: // ignore IMDS answers without AccessKeyIDS if event.IMDS.Type == model.IMDSResponseType && len(event.IMDS.AWS.SecurityCredentials.AccessKeyID) == 0 { - return false, fmt.Errorf("untraced event: IMDS response without credentials") + return false, errors.New("untraced event: IMDS response without credentials") } // ignore IMDS requests without URLs if event.IMDS.Type == model.IMDSRequestType && len(event.IMDS.URL) == 0 { - return false, fmt.Errorf("invalid event: IMDS request without any URL") + return false, errors.New("invalid event: IMDS request without any URL") } } return true, nil diff --git a/pkg/security/security_profile/activity_tree/activity_tree_graph.go b/pkg/security/security_profile/activity_tree/activity_tree_graph.go index 84242070f049bc..cfeba332fc35c8 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_graph.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_graph.go @@ -56,28 +56,29 @@ var ( ) func (at *ActivityTree) getGraphTitle(name string, selector string) string { - title := tableHeader - title += "Name" + name + "" + var builder strings.Builder + builder.WriteString(tableHeader) + builder.WriteString("Name" + name + "") for i, t := range strings.Split(selector, ",") { if i%3 == 0 { if i != 0 { - title += "" + builder.WriteString("") } - title += "" + builder.WriteString("") if i == 0 { - title += "Selector" + builder.WriteString("Selector") } else { - title += "" + builder.WriteString("") } - title += "" + builder.WriteString("") } else { - title += ", " + builder.WriteString(", ") } - title += t + builder.WriteString(t) } - title += "" - title += ">" - return title + builder.WriteString("") + builder.WriteString(">") + return builder.String() } // PrepareGraphData returns a graph from the activity tree @@ -269,11 +270,14 @@ func (at *ActivityTree) prepareDNSNode(n *DNSNode, data *utils.Graph, processID // save guard, this should never happen return utils.GraphID{}, false } - name := n.Requests[0].Question.Name + " (" + (model.QType(n.Requests[0].Question.Type).String()) + var nameBuilder strings.Builder + nameBuilder.WriteString(n.Requests[0].Question.Name + " (" + (model.QType(n.Requests[0].Question.Type).String())) for _, req := range n.Requests[1:] { - name += ", " + model.QType(req.Question.Type).String() + nameBuilder.WriteString(", ") + nameBuilder.WriteString(model.QType(req.Question.Type).String()) } - name += ")" + nameBuilder.WriteString(")") + name := nameBuilder.String() dnsNode := &utils.Node{ ID: processID.Derive(utils.NewNodeIDFromPtr(n)), @@ -309,7 +313,7 @@ func (at *ActivityTree) prepareIMDSNode(n *IMDSNode, data *utils.Graph, processI label += "Host" + n.Event.Host + "" } if n.Event.CloudProvider == model.IMDSAWSCloudProvider { - label += "IMDSv2" + fmt.Sprintf("%v", n.Event.AWS.IsIMDSv2) + "" + label += "IMDSv2" + strconv.FormatBool(n.Event.AWS.IsIMDSv2) + "" if len(n.Event.AWS.SecurityCredentials.AccessKeyID) > 0 { label += " AccessKeyID " + n.Event.AWS.SecurityCredentials.AccessKeyID + "" } @@ -469,24 +473,25 @@ func (at *ActivityTree) prepareFileNode(f *FileNode, data *utils.SubGraph, proce } func (at *ActivityTree) prepareSyscallsNode(p *ProcessNode, data *utils.SubGraph) utils.GraphID { - label := tableHeader + var labelBuilder strings.Builder + labelBuilder.WriteString(tableHeader) for i, s := range p.Syscalls { if i%5 == 0 { if i != 0 { - label += "" + labelBuilder.WriteString("") } - label += "" + labelBuilder.WriteString("") } else { - label += ", " + labelBuilder.WriteString(", ") } - label += model.Syscall(s.Syscall).String() + labelBuilder.WriteString(model.Syscall(s.Syscall).String()) } - label += "" - label += ">" + labelBuilder.WriteString("") + labelBuilder.WriteString(">") syscallsNode := &utils.Node{ ID: utils.NewGraphIDWithDescription("syscalls", utils.NewNodeIDFromPtr(p)), - Label: label, + Label: labelBuilder.String(), Size: smallText, Color: processCategoryColor, FillColor: processCategorySnapshotColor, @@ -499,18 +504,19 @@ func (at *ActivityTree) prepareSyscallsNode(p *ProcessNode, data *utils.SubGraph } func (at *ActivityTree) prepareCapabilitiesNode(p *ProcessNode, data *utils.SubGraph) utils.GraphID { - label := tableHeader + var labelBuilder strings.Builder + labelBuilder.WriteString(tableHeader) for _, capabilityNode := range p.Capabilities { kernelCap := model.KernelCapability(1 << capabilityNode.Capability) - label += "" + kernelCap.String() + "" + strconv.FormatBool(capabilityNode.Capable) + "" + labelBuilder.WriteString("" + kernelCap.String() + "" + strconv.FormatBool(capabilityNode.Capable) + "") } - label += ">" + labelBuilder.WriteString(">") capNode := &utils.Node{ ID: utils.NewGraphIDWithDescription("capabilities", utils.NewNodeIDFromPtr(p)), - Label: label, + Label: labelBuilder.String(), Size: smallText, Color: processCategoryColor, FillColor: processCategorySnapshotColor, diff --git a/pkg/security/security_profile/activity_tree/activity_tree_stats.go b/pkg/security/security_profile/activity_tree/activity_tree_stats.go index a73f8eb709dc76..b9f6427c2c42cc 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_stats.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_stats.go @@ -83,7 +83,7 @@ func (stats *Stats) ApproximateSize() int64 { // SendStats sends metrics to Datadog func (stats *Stats) SendStats(client statsd.ClientInterface, treeType string) error { - treeTypeTag := fmt.Sprintf("tree_type:%s", treeType) + treeTypeTag := "tree_type:" + treeType for evtType, count := range stats.counts { evtTypeTag := fmt.Sprintf("event_type:%s", evtType) diff --git a/pkg/security/security_profile/activity_tree/file_node.go b/pkg/security/security_profile/activity_tree/file_node.go index 3c49d8c48928e1..7b917f009f9b0d 100644 --- a/pkg/security/security_profile/activity_tree/file_node.go +++ b/pkg/security/security_profile/activity_tree/file_node.go @@ -68,24 +68,24 @@ func NewFileNode(fileEvent *model.FileEvent, event *model.Event, name string, im } func (fn *FileNode) getNodeLabel(prefix string) string { - var label string + var builder strings.Builder if prefix == "" { - label += tableHeader - label += "" - label += "Events" - label += "Hash count" - label += "File" - label += "Package" - label += "" - } - label += fn.buildNodeRow(prefix) + builder.WriteString(tableHeader) + builder.WriteString("") + builder.WriteString("Events") + builder.WriteString("Hash count") + builder.WriteString("File") + builder.WriteString("Package") + builder.WriteString("") + } + builder.WriteString(fn.buildNodeRow(prefix)) for _, child := range fn.Children { - label += child.getNodeLabel(prefix + "/" + fn.Name) + builder.WriteString(child.getNodeLabel(prefix + "/" + fn.Name)) } if prefix == "" { - label += ">" + builder.WriteString(">") } - return label + return builder.String() } func (fn *FileNode) buildNodeRow(prefix string) string { diff --git a/pkg/security/security_profile/activity_tree/process_node.go b/pkg/security/security_profile/activity_tree/process_node.go index 717392e6b039da..06da9cb18be103 100644 --- a/pkg/security/security_profile/activity_tree/process_node.go +++ b/pkg/security/security_profile/activity_tree/process_node.go @@ -14,6 +14,7 @@ import ( "io" "sort" "strconv" + "strings" "time" "golang.org/x/sys/unix" @@ -100,9 +101,10 @@ func (pn *ProcessNode) AppendChild(node *ProcessNode) { } func (pn *ProcessNode) getNodeLabel(args string) string { - label := tableHeader + var builder strings.Builder + builder.WriteString(tableHeader) - label += "Command" + builder.WriteString("Command") var cmd string if sprocess.IsBusybox(pn.Process.FileEvent.PathnameStr) { arg0, _ := sprocess.GetProcessArgv0(&pn.Process) @@ -113,23 +115,23 @@ func (pn *ProcessNode) getNodeLabel(args string) string { if len(cmd) > 100 { cmd = cmd[:100] + " ..." } - label += html.EscapeString(cmd) - label += "" + builder.WriteString(html.EscapeString(cmd)) + builder.WriteString("") if len(pn.Process.FileEvent.PkgName) != 0 { - label += "Package" + fmt.Sprintf("%s:%s", pn.Process.FileEvent.PkgName, pn.Process.FileEvent.PkgVersion) + "" + builder.WriteString("Package" + fmt.Sprintf("%s:%s", pn.Process.FileEvent.PkgName, pn.Process.FileEvent.PkgVersion) + "") } // add hashes if len(pn.Process.FileEvent.Hashes) > 0 { - label += "Hashes" + pn.Process.FileEvent.Hashes[0] + "" + builder.WriteString("Hashes" + pn.Process.FileEvent.Hashes[0] + "") for _, h := range pn.Process.FileEvent.Hashes { - label += "" + h + "" + builder.WriteString("" + h + "") } } else { - label += "Hash state" + pn.Process.FileEvent.HashState.String() + "" + builder.WriteString("Hash state" + pn.Process.FileEvent.HashState.String() + "") } - label += ">" - return label + builder.WriteString(">") + return builder.String() } // nolint: unused @@ -146,7 +148,7 @@ func (pn *ProcessNode) debug(w io.Writer, prefix string) { }) for _, f := range sortedFiles { - f.debug(w, fmt.Sprintf("%s -", prefix)) + f.debug(w, prefix+" -") } } if len(pn.DNSNames) > 0 { diff --git a/pkg/security/security_profile/ad.go b/pkg/security/security_profile/ad.go index e920bd52a0d118..d17121d98edc4e 100644 --- a/pkg/security/security_profile/ad.go +++ b/pkg/security/security_profile/ad.go @@ -544,7 +544,7 @@ func (m *Manager) startDumpWithConfig(containerID containerutils.ContainerID, cg LinuxDistribution: m.kernelVersion.OsRelease["PRETTY_NAME"], Arch: utils.RuntimeArch(), - Name: fmt.Sprintf("activity-dump-%s", utils.RandString(10)), + Name: "activity-dump-" + utils.RandString(10), ProtobufVersion: profile.ProtobufVersion, DifferentiateArgs: m.config.RuntimeSecurity.ActivityDumpCgroupDifferentiateArgs, ContainerID: containerID, diff --git a/pkg/security/security_profile/grpc.go b/pkg/security/security_profile/grpc.go index b26e043986ea7f..17ab2e772fe3b3 100644 --- a/pkg/security/security_profile/grpc.go +++ b/pkg/security/security_profile/grpc.go @@ -92,7 +92,7 @@ func (m *Manager) DumpActivity(params *api.ActivityDumpParams) (*api.ActivityDum LinuxDistribution: m.kernelVersion.OsRelease["PRETTY_NAME"], Arch: utils.RuntimeArch(), - Name: fmt.Sprintf("activity-dump-%s", utils.RandString(10)), + Name: "activity-dump-" + utils.RandString(10), ProtobufVersion: profile.ProtobufVersion, DifferentiateArgs: params.GetDifferentiateArgs(), ContainerID: containerutils.ContainerID(params.GetContainerID()), @@ -290,7 +290,7 @@ func (m *Manager) SaveSecurityProfile(params *api.SecurityProfileSaveParams) (*a } // write profile to encoded profile to disk - f, err := os.CreateTemp("/tmp", fmt.Sprintf("%s-*.profile", p.Metadata.Name)) + f, err := os.CreateTemp("/tmp", p.Metadata.Name+"-*.profile") if err != nil { return nil, fmt.Errorf("couldn't create temporary file: %w", err) } diff --git a/pkg/security/security_profile/load_controller.go b/pkg/security/security_profile/load_controller.go index afa93dbaea6d52..13e45dea7b03d2 100644 --- a/pkg/security/security_profile/load_controller.go +++ b/pkg/security/security_profile/load_controller.go @@ -113,7 +113,7 @@ func (m *Manager) nextPartialDump(prev *dump.ActivityDump) *dump.ActivityDump { newDump := dump.NewActivityDump(m.pathsReducer, prev.Profile.Metadata.DifferentiateArgs, 0, m.config.RuntimeSecurity.ActivityDumpTracedEventTypes, m.updateTracedPid, newLoadConfig, func(ad *dump.ActivityDump) { ad.Profile.Header = prev.Profile.Header ad.Profile.Metadata = prev.Profile.Metadata - ad.Profile.Metadata.Name = fmt.Sprintf("activity-dump-%s", utils.RandString(10)) + ad.Profile.Metadata.Name = "activity-dump-" + utils.RandString(10) ad.Profile.Metadata.Start = now ad.Profile.Metadata.End = now.Add(newTimeout) ad.Profile.AddTags(prev.Profile.GetTags()) diff --git a/pkg/security/security_profile/manager.go b/pkg/security/security_profile/manager.go index 20349e05c4240d..9f063963383ea7 100644 --- a/pkg/security/security_profile/manager.go +++ b/pkg/security/security_profile/manager.go @@ -263,7 +263,7 @@ func NewManager(cfg *config.Config, statsdClient statsd.ClientInterface, ebpf *e } // add source tag if len(utils.GetTagValue("source", contextTags)) == 0 { - contextTags = append(contextTags, fmt.Sprintf("source:%s", ActivityDumpSource)) + contextTags = append(contextTags, "source:"+ActivityDumpSource) } containerFilters, err := utils.NewContainerFilter() diff --git a/pkg/security/security_profile/manager_test.go b/pkg/security/security_profile/manager_test.go index 65c81adf6cb255..01cd1b07d69d63 100644 --- a/pkg/security/security_profile/manager_test.go +++ b/pkg/security/security_profile/manager_test.go @@ -10,8 +10,8 @@ package securityprofile import ( "errors" - "fmt" "math/rand" + "strconv" "testing" "time" "unsafe" @@ -1819,9 +1819,9 @@ func TestSecurityProfileManager_tryAutolearn(t *testing.T) { baseDNSReq := ti.eventDNSReq for currentIncrement < ti.loopUntil { if ti.eventType == model.ExecEventType { - ti.eventProcessPath = basePath + fmt.Sprintf("%d", rand.Int()) + ti.eventProcessPath = basePath + strconv.Itoa(rand.Int()) } else if ti.eventType == model.DNSEventType { - ti.eventDNSReq = fmt.Sprintf("%d", rand.Int()) + baseDNSReq + ti.eventDNSReq = strconv.Itoa(rand.Int()) + baseDNSReq } ti.eventTimestampRaw = currentIncrement event := craftFakeEvent(t0, &ti, defaultContainerID) diff --git a/pkg/security/security_profile/profile/grpc.go b/pkg/security/security_profile/profile/grpc.go index 6602a26c1335f0..5623019ca30182 100644 --- a/pkg/security/security_profile/profile/grpc.go +++ b/pkg/security/security_profile/profile/grpc.go @@ -9,6 +9,7 @@ package profile import ( + "errors" "fmt" "path/filepath" "time" @@ -76,7 +77,7 @@ func (p *Profile) ToSecurityActivityDumpMessage(timeout time.Duration, storageRe func NewProfileFromActivityDumpMessage(msg *api.ActivityDumpMessage) (*Profile, map[config.StorageFormat][]config.StorageRequest, error) { metadata := msg.GetMetadata() if metadata == nil { - return nil, nil, fmt.Errorf("couldn't create new Profile: missing activity dump metadata") + return nil, nil, errors.New("couldn't create new Profile: missing activity dump metadata") } startTime, err := time.Parse(time.RFC822, metadata.GetStart()) diff --git a/pkg/security/security_profile/profile/profile.go b/pkg/security/security_profile/profile/profile.go index 1871662c9ad5c3..e58f625e0467c9 100644 --- a/pkg/security/security_profile/profile/profile.go +++ b/pkg/security/security_profile/profile/profile.go @@ -17,6 +17,7 @@ import ( "os" "path/filepath" "slices" + "strings" "sync" "time" @@ -704,11 +705,12 @@ func (p *Profile) ListAllVersionStates() { fmt.Printf("### Profile: %+v\n", p.GetSelectorStr()) orderedVersions := p.getTimeOrderedVersionContexts() - versions := "" + var versionsBuilder strings.Builder for version := range p.versionContexts { - versions += version + " " + versionsBuilder.WriteString(version) + versionsBuilder.WriteString(" ") } - fmt.Printf("Versions: %s\n", versions) + fmt.Printf("Versions: %s\n", versionsBuilder.String()) fmt.Printf("Global state: %s\n", p.getGlobalState().String()) for i, version := range orderedVersions { diff --git a/pkg/security/security_profile/storage/backend/forwarder.go b/pkg/security/security_profile/storage/backend/forwarder.go index 2a97045fa1308b..defb6f1beb8643 100644 --- a/pkg/security/security_profile/storage/backend/forwarder.go +++ b/pkg/security/security_profile/storage/backend/forwarder.go @@ -156,7 +156,7 @@ func (backend *ActivityDumpRemoteBackend) HandleActivityDump(imageName string, i // SendTelemetry sends telemetry for the current storage func (backend *ActivityDumpRemoteBackend) SendTelemetry(sender statsd.ClientInterface) { // send too large entity metric - tags := []string{fmt.Sprintf("format:%s", protobufFormat), fmt.Sprintf("compression:%v", true)} + tags := []string{"format:" + protobufFormat, fmt.Sprintf("compression:%v", true)} _ = sender.Count(metrics.MetricActivityDumpEntityTooLarge, int64(backend.tooLargeEntities.Load()), tags, 1.0) } diff --git a/pkg/security/security_profile/storage/directory.go b/pkg/security/security_profile/storage/directory.go index 6cbe1dff89a4dc..02be4a973972e7 100644 --- a/pkg/security/security_profile/storage/directory.go +++ b/pkg/security/security_profile/storage/directory.go @@ -11,6 +11,7 @@ package storage import ( "bytes" "compress/gzip" + "errors" "fmt" "io/fs" "os" @@ -211,11 +212,11 @@ func (d *Directory) Persist(request config.StorageRequest, p *profile.Profile, r // Load loads the profile for the provided selector if it exists func (d *Directory) Load(wls *cgroupModel.WorkloadSelector, p *profile.Profile) (bool, error) { if wls == nil { - return false, fmt.Errorf("no selector was provided") + return false, errors.New("no selector was provided") } if p == nil { - return false, fmt.Errorf("no profile was provided") + return false, errors.New("no profile was provided") } d.profilesLock.RLock() diff --git a/pkg/security/security_profile/storage/remote_storage_forwarder.go b/pkg/security/security_profile/storage/remote_storage_forwarder.go index fa0baf3f65da27..bf7be9b294af24 100644 --- a/pkg/security/security_profile/storage/remote_storage_forwarder.go +++ b/pkg/security/security_profile/storage/remote_storage_forwarder.go @@ -11,7 +11,7 @@ package storage import ( "bytes" "encoding/json" - "fmt" + "errors" "strings" "github.com/DataDog/datadog-go/v5/statsd" @@ -50,7 +50,7 @@ func (storage *ActivityDumpRemoteStorageForwarder) Persist(request config.Storag // marshal event metadata headerData, err := json.Marshal(p.Header) if err != nil { - return fmt.Errorf("couldn't marshall event metadata") + return errors.New("couldn't marshall event metadata") } if storage.activityDumpHandler == nil { diff --git a/pkg/security/serializers/serializers_linux.go b/pkg/security/serializers/serializers_linux.go index d8f8bd95b425aa..da6e129e92e172 100644 --- a/pkg/security/serializers/serializers_linux.go +++ b/pkg/security/serializers/serializers_linux.go @@ -13,6 +13,7 @@ package serializers import ( "fmt" "path" + "strconv" "syscall" "time" @@ -1005,7 +1006,7 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer func serializeK8sContext(e *model.Event, ctx *model.UserSessionContext, userSessionContextSerializer *UserSessionContextSerializer) { e.FieldHandlers.ResolveK8SUserSessionContext(e, &ctx.K8SSessionContext) - userSessionContextSerializer.K8SSessionID = fmt.Sprintf("%x", ctx.K8SSessionID) + userSessionContextSerializer.K8SSessionID = strconv.FormatUint(uint64(ctx.K8SSessionID), 16) userSessionContextSerializer.K8SUsername = ctx.K8SUsername userSessionContextSerializer.K8SUID = ctx.K8SUID userSessionContextSerializer.K8SGroups = ctx.K8SGroups @@ -1028,7 +1029,7 @@ func serializeSSHContext(ctx *model.UserSessionContext, userSessionContextSerial sshAuthMethod = "" } - userSessionContextSerializer.SSHSessionID = fmt.Sprintf("%x", ctx.SSHSessionID) + userSessionContextSerializer.SSHSessionID = strconv.FormatUint(uint64(ctx.SSHSessionID), 16) userSessionContextSerializer.SSHClientPort = ctx.SSHClientPort userSessionContextSerializer.SSHClientIP = sshClientIP userSessionContextSerializer.SSHAuthMethod = sshAuthMethod @@ -1417,7 +1418,7 @@ type DDContextSerializer struct { func newDDContextSerializer(e *model.Event) *DDContextSerializer { s := &DDContextSerializer{} if e.SpanContext.SpanID != 0 && (e.SpanContext.TraceID.Hi != 0 || e.SpanContext.TraceID.Lo != 0) { - s.SpanID = fmt.Sprint(e.SpanContext.SpanID) + s.SpanID = strconv.FormatUint(e.SpanContext.SpanID, 10) s.TraceID = fmt.Sprintf("%x%x", e.SpanContext.TraceID.Hi, e.SpanContext.TraceID.Lo) return s } @@ -1430,7 +1431,7 @@ func newDDContextSerializer(e *model.Event) *DDContextSerializer { pce := (*model.ProcessCacheEntry)(ptr) if pce.SpanID != 0 && (pce.TraceID.Hi != 0 || pce.TraceID.Lo != 0) { - s.SpanID = fmt.Sprint(pce.SpanID) + s.SpanID = strconv.FormatUint(pce.SpanID, 10) s.TraceID = fmt.Sprintf("%x%x", pce.TraceID.Hi, pce.TraceID.Lo) break } @@ -1487,7 +1488,7 @@ func newSetSockOptEventSerializer(e *model.Event) *SetSockOptEventSerializer { case syscall.SOL_IPV6: SetSockOptEventSerializer.OptName = model.SetSockOptOptNameIPv6(e.SetSockOpt.OptName).String() default: - SetSockOptEventSerializer.OptName = fmt.Sprintf("%d", e.SetSockOpt.OptName) + SetSockOptEventSerializer.OptName = strconv.FormatUint(uint64(e.SetSockOpt.OptName), 10) } return &SetSockOptEventSerializer } diff --git a/pkg/security/tests/activity_dumps_common.go b/pkg/security/tests/activity_dumps_common.go index fd7489047e8c9f..535fe9a6332f48 100644 --- a/pkg/security/tests/activity_dumps_common.go +++ b/pkg/security/tests/activity_dumps_common.go @@ -9,7 +9,6 @@ package tests import ( - "fmt" "os" "path/filepath" "testing" @@ -35,7 +34,7 @@ func validateActivityDumpOutputs(t *testing.T, test *testModule, expectedFormats securityProfileValidator func(sp *profile.Profile) bool) { perExtOK := make(map[string]bool) for _, format := range expectedFormats { - ext := fmt.Sprintf(".%s", format) + ext := "." + format perExtOK[ext] = false } diff --git a/pkg/security/tests/activity_dumps_loadcontroller_test.go b/pkg/security/tests/activity_dumps_loadcontroller_test.go index 8324bf045133a7..8f4be90e80384d 100644 --- a/pkg/security/tests/activity_dumps_loadcontroller_test.go +++ b/pkg/security/tests/activity_dumps_loadcontroller_test.go @@ -12,6 +12,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "testing" "time" @@ -159,13 +160,14 @@ func TestActivityDumpsLoadControllerEventTypes(t *testing.T) { }() for activeEventTypes := securityprofile.TracedEventTypesReductionOrder; ; activeEventTypes = activeEventTypes[1:] { - testName := "" + var testNameBuilder strings.Builder for i, activeEventType := range activeEventTypes { if i > 0 { - testName += "-" + testNameBuilder.WriteString("-") } - testName += activeEventType.String() + testNameBuilder.WriteString(activeEventType.String()) } + testName := testNameBuilder.String() if testName == "" { testName = "none" } diff --git a/pkg/security/tests/activity_dumps_test.go b/pkg/security/tests/activity_dumps_test.go index 884f71df30a66c..c82a5c0f518dd1 100644 --- a/pkg/security/tests/activity_dumps_test.go +++ b/pkg/security/tests/activity_dumps_test.go @@ -9,9 +9,9 @@ package tests import ( - "fmt" "os" "path/filepath" + "strconv" "strings" "testing" "time" @@ -454,7 +454,7 @@ func TestActivityDumps(t *testing.T) { } var files []string for i := 0; i < testActivityDumpRateLimiter*10; i++ { - files = append(files, filepath.Join(testDir, "ad-test-create-"+fmt.Sprintf("%d", i))) + files = append(files, filepath.Join(testDir, "ad-test-create-"+strconv.Itoa(i))) } args := []string{"sleep", "2", ";", "open"} args = append(args, files...) diff --git a/pkg/security/tests/cgroup_test.go b/pkg/security/tests/cgroup_test.go index 1829f1e895b6f7..1ca518976df212 100644 --- a/pkg/security/tests/cgroup_test.go +++ b/pkg/security/tests/cgroup_test.go @@ -9,7 +9,6 @@ package tests import ( - "fmt" "os" "os/exec" "slices" @@ -164,9 +163,9 @@ func TestCGroup(t *testing.T) { }) test.WaitSignal(t, func() error { - serviceUnit := fmt.Sprintf(`[Service] + serviceUnit := `[Service] Type=oneshot -ExecStart=/usr/bin/touch %s`, testFile2) +ExecStart=/usr/bin/touch ` + testFile2 if err := os.WriteFile("/etc/systemd/system/cws-test.service", []byte(serviceUnit), 0700); err != nil { return err } diff --git a/pkg/security/tests/cmdwrapper.go b/pkg/security/tests/cmdwrapper.go index 91a3acd3bcbb6c..ab1f59675f5967 100644 --- a/pkg/security/tests/cmdwrapper.go +++ b/pkg/security/tests/cmdwrapper.go @@ -113,7 +113,7 @@ func (d *dockerCmdWrapper) CommandContext(ctx context.Context, bin string, args } func (d *dockerCmdWrapper) start() ([]byte, error) { - d.containerName = fmt.Sprintf("docker-wrapper-%s", utils.RandString(6)) + d.containerName = "docker-wrapper-" + utils.RandString(6) cmd := exec.Command(d.executable, "run", "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined", "--rm", "--cap-add", "NET_ADMIN", "-d", "--name", d.containerName, "-v", d.mountSrc+":"+d.mountDest, d.image, "sleep", "1200") out, err := cmd.CombinedOutput() if err != nil { diff --git a/pkg/security/tests/connect_test.go b/pkg/security/tests/connect_test.go index 4423f91333dc75..8dbaf741c684f0 100644 --- a/pkg/security/tests/connect_test.go +++ b/pkg/security/tests/connect_test.go @@ -130,7 +130,7 @@ func TestConnectEventAFInetIOUring(t *testing.T) { select { case result = <-ch: case <-time.After(8 * time.Second): - return fmt.Errorf("timeout waiting for io_uring connect") + return errors.New("timeout waiting for io_uring connect") } ret, err := result.ReturnInt() if err != nil { diff --git a/pkg/security/tests/dentry_test.go b/pkg/security/tests/dentry_test.go index 3ad07e6e299bac..037b58eda80440 100644 --- a/pkg/security/tests/dentry_test.go +++ b/pkg/security/tests/dentry_test.go @@ -11,6 +11,7 @@ package tests import ( "os" "path" + "strings" "syscall" "testing" @@ -28,10 +29,7 @@ func TestDentryPathERPC(t *testing.T) { SkipIfNotAvailable(t) // generate a basename up to the current limit of the agent - var basename string - for i := 0; i < model.MaxSegmentLength; i++ { - basename += "a" - } + basename := strings.Repeat("a", model.MaxSegmentLength) rule := &rules.RuleDefinition{ ID: "test_erpc_path_rule", Expression: `open.flags & (O_CREAT|O_NOCTTY|O_NOFOLLOW) != 0 && process.file.name == "testsuite"`, @@ -95,10 +93,7 @@ func TestDentryPathMap(t *testing.T) { SkipIfNotAvailable(t) // generate a basename up to the current limit of the agent - var basename string - for i := 0; i < model.MaxSegmentLength; i++ { - basename += "a" - } + basename := strings.Repeat("a", model.MaxSegmentLength) rule := &rules.RuleDefinition{ ID: "test_map_path_rule", Expression: `open.flags & (O_CREAT|O_NOCTTY|O_NOFOLLOW) != 0 && process.file.name == "testsuite"`, @@ -162,10 +157,7 @@ func TestDentryName(t *testing.T) { SkipIfNotAvailable(t) // generate a basename up to the current limit of the agent - var basename string - for i := 0; i < model.MaxSegmentLength; i++ { - basename += "a" - } + basename := strings.Repeat("a", model.MaxSegmentLength) rule := &rules.RuleDefinition{ ID: "test_dentry_name_rule", Expression: `open.flags & (O_CREAT|O_NOCTTY|O_NOFOLLOW) != 0 && process.file.name == "testsuite"`, diff --git a/pkg/security/tests/event_test.go b/pkg/security/tests/event_test.go index 7c574e893f25d6..897f261d8d4167 100644 --- a/pkg/security/tests/event_test.go +++ b/pkg/security/tests/event_test.go @@ -377,10 +377,7 @@ func TestEventProductTags(t *testing.T) { } func truncatedParents(t *testing.T, staticOpts testOpts, dynamicOpts dynamicTestOpts) { - var truncatedParents string - for i := 0; i < model.MaxPathDepth; i++ { - truncatedParents += "a/" - } + truncatedParents := strings.Repeat("a/", model.MaxPathDepth) rule := &rules.RuleDefinition{ ID: "path_test", diff --git a/pkg/security/tests/failed_dns_test.go b/pkg/security/tests/failed_dns_test.go index 6e6d59c9025e86..3e863809c75a31 100644 --- a/pkg/security/tests/failed_dns_test.go +++ b/pkg/security/tests/failed_dns_test.go @@ -11,15 +11,17 @@ package tests // Package tests holds tests related files import ( "encoding/base64" + "encoding/hex" "encoding/json" - "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/security/events" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" - "github.com/stretchr/testify/assert" - "testing" - "time" ) // Purpose of the test: @@ -52,7 +54,7 @@ func getPayloadBytes(customEvent *events.CustomEvent) (string, error) { if err != nil { return "", err } - return fmt.Sprintf("%x", decoded), nil + return hex.EncodeToString(decoded), nil } func TestFailedDNSFullResponse(t *testing.T) { diff --git a/pkg/security/tests/filters_test.go b/pkg/security/tests/filters_test.go index 12287a2ae73f96..078d90afe6a0fc 100644 --- a/pkg/security/tests/filters_test.go +++ b/pkg/security/tests/filters_test.go @@ -16,6 +16,7 @@ import ( "os/exec" "path" "path/filepath" + "strings" "syscall" "testing" "time" @@ -57,10 +58,7 @@ func TestFilterOpenBasenameApprover(t *testing.T) { SkipIfNotAvailable(t) // generate a basename up to the current limit of the agent - var basename string - for i := 0; i < model.MaxSegmentLength; i++ { - basename += "a" - } + basename := strings.Repeat("a", model.MaxSegmentLength) rule := &rules.RuleDefinition{ ID: "test_rule", Expression: fmt.Sprintf(`open.file.path == "{{.Root}}/%s"`, basename), diff --git a/pkg/security/tests/flow_pid_security_sk_classify_flow_test.go b/pkg/security/tests/flow_pid_security_sk_classify_flow_test.go index ae40f0104fc35b..19c93d007ee265 100644 --- a/pkg/security/tests/flow_pid_security_sk_classify_flow_test.go +++ b/pkg/security/tests/flow_pid_security_sk_classify_flow_test.go @@ -83,7 +83,7 @@ func udpCreateSocketAndSend(sockDomain int, serverSockAddr syscall.Sockaddr, bin boundPort <- addr.Port default: close(boundPort) - clientErr <- fmt.Errorf("getsockname error: unknown Sockaddr type") + clientErr <- errors.New("getsockname error: unknown Sockaddr type") return } <-next @@ -165,7 +165,7 @@ func tcpCreateSocketAndSend(sockDomain int, serverSockAddr syscall.Sockaddr, bin boundPort <- addr.Port default: close(boundPort) - clientErr <- fmt.Errorf("getsockname error: unknown Sockaddr type") + clientErr <- errors.New("getsockname error: unknown Sockaddr type") return } @@ -296,7 +296,7 @@ func startServer(sockDomain int, sockType int, sockAddr syscall.Sockaddr, server if !serverAcceptedChanClosed { close(serverAccepted) } - serverErr <- fmt.Errorf("accept timed out after 10s") + serverErr <- errors.New("accept timed out after 10s") return } diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index d1dcbd1402c896..512da51e3e15ad 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -651,7 +651,7 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs [] var proFile *os.File if withProfile { var err error - proFile, err = os.CreateTemp("/tmp", fmt.Sprintf("cpu-profile-%s", t.Name())) + proFile, err = os.CreateTemp("/tmp", "cpu-profile-"+t.Name()) if err != nil { t.Fatal(err) } @@ -1579,7 +1579,7 @@ func (tm *testModule) triggerLoadControllerReducer(_ *dockerCmdWrapper, id *acti func (tm *testModule) dockerCreateFiles(dockerInstance *dockerCmdWrapper, syscallTester string, directory string, numberOfFiles int) error { var files []string for i := 0; i < numberOfFiles; i++ { - files = append(files, filepath.Join(directory, "ad-test-create-"+fmt.Sprintf("%d", i))) + files = append(files, filepath.Join(directory, "ad-test-create-"+strconv.Itoa(i))) } args := []string{"sleep", "2", ";", "open"} args = append(args, files...) diff --git a/pkg/security/tests/move_mount_test.go b/pkg/security/tests/move_mount_test.go index b5d70c47db0ff0..16c3659b446842 100644 --- a/pkg/security/tests/move_mount_test.go +++ b/pkg/security/tests/move_mount_test.go @@ -9,6 +9,7 @@ package tests import ( + "errors" "fmt" "os" "strings" @@ -34,7 +35,7 @@ func GetMountID(fd int) (uint64, error) { } if stx.Mask&unix.STATX_MNT_ID == 0 { - return 0, fmt.Errorf("statx: kernel didn't fill STATX_MNT_ID") + return 0, errors.New("statx: kernel didn't fill STATX_MNT_ID") } return stx.Mnt_id, nil diff --git a/pkg/security/tests/network_device_test.go b/pkg/security/tests/network_device_test.go index 1a192bbefd85dc..2fb64bb739daad 100644 --- a/pkg/security/tests/network_device_test.go +++ b/pkg/security/tests/network_device_test.go @@ -9,6 +9,7 @@ package tests import ( + "errors" "fmt" "os" "os/exec" @@ -83,7 +84,7 @@ func TestNetDevice(t *testing.T) { } stat, ok := fi.Sys().(*syscall.Stat_t) if !ok { - return fmt.Errorf("couldn't parse test_netns inum") + return errors.New("couldn't parse test_netns inum") } testNetns = uint32(stat.Ino) diff --git a/pkg/security/tests/open_test.go b/pkg/security/tests/open_test.go index adaec17e5653a0..ad0d839fe24ac1 100644 --- a/pkg/security/tests/open_test.go +++ b/pkg/security/tests/open_test.go @@ -211,7 +211,7 @@ func TestOpen(t *testing.T) { test.WaitSignal(t, func() error { if f == nil { - return fmt.Errorf("failed to open test file") + return errors.New("failed to open test file") } // ftruncate _, _, errno := syscall.Syscall(syscall.SYS_FTRUNCATE, f.Fd(), uintptr(4), 0) diff --git a/pkg/security/tests/process_test.go b/pkg/security/tests/process_test.go index f4e3631701b80e..e177a289f523e7 100644 --- a/pkg/security/tests/process_test.go +++ b/pkg/security/tests/process_test.go @@ -425,10 +425,7 @@ func TestProcessContext(t *testing.T) { envs := []string{"LD_LIBRARY_PATH=/tmp/lib"} // size overflow - var long string - for i := 0; i != 1024; i++ { - long += "a" - } + long := strings.Repeat("a", 1024) args = append(args, long) test.WaitSignal(t, func() error { @@ -571,11 +568,7 @@ func TestProcessContext(t *testing.T) { envs := []string{"LD_LIBRARY_PATH=/tmp/lib"} // size overflow - var long string - for i := 0; i != 1024; i++ { - long += "a" - } - long += "=" + long := strings.Repeat("a", 1024) + "=" envs = append(envs, long) if kind == dockerWrapperType { diff --git a/pkg/security/tests/rename_test.go b/pkg/security/tests/rename_test.go index f0839770f01fdd..d5d5d4b007840f 100644 --- a/pkg/security/tests/rename_test.go +++ b/pkg/security/tests/rename_test.go @@ -381,7 +381,7 @@ func TestRenameFolder(t *testing.T) { t.Fatal(err) } - filename := fmt.Sprintf("%s/test-rename", testOldFolder) + filename := testOldFolder + "/test-rename" defer os.Remove(filename) for i := 0; i != 5; i++ { @@ -405,7 +405,7 @@ func TestRenameFolder(t *testing.T) { testOldFolder = testNewFolder testNewFolder = old - filename = fmt.Sprintf("%s/test-rename", testOldFolder) + filename = testOldFolder + "/test-rename" }) } } diff --git a/pkg/security/tests/sbom_test.go b/pkg/security/tests/sbom_test.go index a2b23873436b55..2edd926751784c 100644 --- a/pkg/security/tests/sbom_test.go +++ b/pkg/security/tests/sbom_test.go @@ -9,6 +9,7 @@ package tests import ( + "errors" "fmt" "os/exec" "testing" @@ -96,7 +97,7 @@ func TestSBOM(t *testing.T) { test.WaitSignal(t, func() error { sbom := p.Resolvers.SBOMResolver.GetWorkload("") if sbom == nil { - return fmt.Errorf("failed to find host SBOM for host") + return errors.New("failed to find host SBOM for host") } cmd := exec.Command("/bin/touch", "/usr/lib/os-release") return cmd.Run() @@ -126,7 +127,7 @@ func checkVersionAgainstApt(tb testing.TB, event *model.Event, pkgName string) { out, err := exec.Command("apt-cache", "policy", pkgName).CombinedOutput() require.NoError(tb, err, "failed to get package version: %s", string(out)) - assert.Contains(tb, string(out), fmt.Sprintf("Installed: %s", v), "package version doesn't match") + assert.Contains(tb, string(out), "Installed: "+v, "package version doesn't match") } func buildDebianVersion(version, release string, epoch int) string { diff --git a/pkg/security/tests/span_test.go b/pkg/security/tests/span_test.go index aed009f11025af..260fd7148aab5f 100644 --- a/pkg/security/tests/span_test.go +++ b/pkg/security/tests/span_test.go @@ -12,6 +12,7 @@ import ( "fmt" "os" "os/exec" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -73,7 +74,7 @@ func TestSpan(t *testing.T) { test.validateSpanSchema(t, event) - assert.Equal(t, "204", fmt.Sprint(event.SpanContext.SpanID)) + assert.Equal(t, "204", strconv.FormatUint(event.SpanContext.SpanID, 10)) assert.Equal(t, fakeTraceID128b, event.SpanContext.TraceID.String()) }) }) @@ -105,7 +106,7 @@ func TestSpan(t *testing.T) { test.validateSpanSchema(t, event) - assert.Equal(t, "204", fmt.Sprint(event.SpanContext.SpanID)) + assert.Equal(t, "204", strconv.FormatUint(event.SpanContext.SpanID, 10)) assert.Equal(t, fakeTraceID128b, event.SpanContext.TraceID.String()) }) }) diff --git a/pkg/security/tests/syscall_tester.go b/pkg/security/tests/syscall_tester.go index b34fe56a1c3a6e..00e3004750a4c4 100644 --- a/pkg/security/tests/syscall_tester.go +++ b/pkg/security/tests/syscall_tester.go @@ -21,7 +21,7 @@ import ( var syscallTesterFS embed.FS func loadSyscallTester(t *testing.T, test *testModule, binary string) (string, error) { - testerBin, err := syscallTesterFS.ReadFile(fmt.Sprintf("syscall_tester/bin/%s", binary)) + testerBin, err := syscallTesterFS.ReadFile("syscall_tester/bin/" + binary) if err != nil { return "", err } diff --git a/pkg/security/tests/user_session_test.go b/pkg/security/tests/user_session_test.go index 03237882798efb..624331b10a3207 100644 --- a/pkg/security/tests/user_session_test.go +++ b/pkg/security/tests/user_session_test.go @@ -11,6 +11,7 @@ import ( "fmt" "os" "os/exec" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -92,7 +93,7 @@ func TestK8SUserSession(t *testing.T) { }, event.ProcessContext.UserSession.K8SExtra) // Check that user session data is well set assert.Equal(t, event.ProcessContext.UserSession.Identity, event.ProcessContext.UserSession.K8SUsername) - assert.Equal(t, event.ProcessContext.UserSession.ID, fmt.Sprintf("%x", event.ProcessContext.UserSession.K8SSessionID)) + assert.Equal(t, event.ProcessContext.UserSession.ID, strconv.FormatUint(uint64(event.ProcessContext.UserSession.K8SSessionID), 16)) }) }) } diff --git a/pkg/security/utils/notifier.go b/pkg/security/utils/notifier.go index 8a28e686bdcf55..da362255c48450 100644 --- a/pkg/security/utils/notifier.go +++ b/pkg/security/utils/notifier.go @@ -7,7 +7,7 @@ package utils import ( - "fmt" + "errors" "sync" ) @@ -30,7 +30,7 @@ func (n *Notifier[E, O]) RegisterListener(event E, listener Listener[O]) error { if n.listeners != nil { n.listeners[event] = append(n.listeners[event], listener) } else { - return fmt.Errorf("a listener was inserted before initialization") + return errors.New("a listener was inserted before initialization") } return nil } diff --git a/pkg/security/utils/proc_linux.go b/pkg/security/utils/proc_linux.go index 38cc8aefa92a8e..71b41c8eb5294b 100644 --- a/pkg/security/utils/proc_linux.go +++ b/pkg/security/utils/proc_linux.go @@ -539,7 +539,7 @@ func GetTracerPid(pid uint32) (uint32, error) { return uint32(tracerPid), nil } } - return 0, fmt.Errorf("TracerPid field not found") + return 0, errors.New("TracerPid field not found") } // FindTraceesByTracerPid returns the process list being trced by the given tracer host PID diff --git a/pkg/serializer/internal/metrics/events_test.go b/pkg/serializer/internal/metrics/events_test.go index 455e464de6c2c7..160f96ca635721 100644 --- a/pkg/serializer/internal/metrics/events_test.go +++ b/pkg/serializer/internal/metrics/events_test.go @@ -118,7 +118,7 @@ func createBenchmarkEvents(numberOfItem int) []*event.Event { func runBenchmark(b *testing.B, bench func(*testing.B, int)) { for i := 1; i <= 1000*1000; i *= 10 { numberOfItem := i // To avoid linter waring - b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { + b.Run(strconv.Itoa(i), func(b *testing.B) { bench(b, numberOfItem) }) } diff --git a/pkg/serializer/internal/metrics/pipeline.go b/pkg/serializer/internal/metrics/pipeline.go index 879c44d5b2835e..6a6895088313dd 100644 --- a/pkg/serializer/internal/metrics/pipeline.go +++ b/pkg/serializer/internal/metrics/pipeline.go @@ -6,10 +6,10 @@ package metrics import ( - "fmt" "maps" "net/http" "slices" + "strconv" "github.com/google/uuid" @@ -102,8 +102,8 @@ func (dest *PipelineDestination) send(payloads transaction.BytesPayloads, forwar } if dest.AddValidationHeaders { txn.Headers.Set("X-Metrics-Request-ID", batchID) - txn.Headers.Set("X-Metrics-Request-Seq", fmt.Sprintf("%v", seq)) - txn.Headers.Set("X-Metrics-Request-Len", fmt.Sprintf("%v", len(payloads))) + txn.Headers.Set("X-Metrics-Request-Seq", strconv.Itoa(seq)) + txn.Headers.Set("X-Metrics-Request-Len", strconv.Itoa(len(payloads))) } auth.Authorize(txn) diff --git a/pkg/serializer/internal/metrics/service_checks_test.go b/pkg/serializer/internal/metrics/service_checks_test.go index 584cfb8d8462e1..f4a89e6137510f 100644 --- a/pkg/serializer/internal/metrics/service_checks_test.go +++ b/pkg/serializer/internal/metrics/service_checks_test.go @@ -8,7 +8,7 @@ package metrics import ( - "fmt" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -118,7 +118,7 @@ func createServiceChecks(numberOfItem int) ServiceChecks { var serviceCheckCollections []*servicecheck.ServiceCheck for i := 0; i < numberOfItem; i++ { - serviceCheckCollections = append(serviceCheckCollections, createServiceCheck(fmt.Sprint(i))) + serviceCheckCollections = append(serviceCheckCollections, createServiceCheck(strconv.Itoa(i))) } return ServiceChecks(serviceCheckCollections) } diff --git a/pkg/serializer/marshaler/dummy_marshaller.go b/pkg/serializer/marshaler/dummy_marshaller.go index 07e82da14d53f6..bb237773bee62f 100644 --- a/pkg/serializer/marshaler/dummy_marshaller.go +++ b/pkg/serializer/marshaler/dummy_marshaller.go @@ -8,7 +8,6 @@ package marshaler import ( "errors" - "fmt" jsoniter "github.com/json-iterator/go" ) @@ -56,10 +55,10 @@ func (d *DummyMarshaller) WriteFooter(stream *jsoniter.Stream) error { // MarshalJSON not implemented func (d *DummyMarshaller) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("not implemented") + return nil, errors.New("not implemented") } // Marshal not implemented func (d *DummyMarshaller) Marshal() ([]byte, error) { - return nil, fmt.Errorf("not implemented") + return nil, errors.New("not implemented") } diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go index 15cd1087777399..0549bd7299f212 100644 --- a/pkg/serializer/serializer_test.go +++ b/pkg/serializer/serializer_test.go @@ -8,6 +8,7 @@ package serializer import ( + "errors" "fmt" "net/http" "reflect" @@ -169,10 +170,10 @@ func (p *testPayload) DescribeItem(i int) string { return "description" } type testErrorPayload struct{} //nolint:revive // TODO(AML) Fix revive linter -func (p *testErrorPayload) MarshalJSON() ([]byte, error) { return nil, fmt.Errorf("some error") } +func (p *testErrorPayload) MarshalJSON() ([]byte, error) { return nil, errors.New("some error") } //nolint:revive // TODO(AML) Fix revive linter -func (p *testErrorPayload) Marshal() ([]byte, error) { return nil, fmt.Errorf("some error") } +func (p *testErrorPayload) Marshal() ([]byte, error) { return nil, errors.New("some error") } func (p *testErrorPayload) WriteHeader(stream *jsoniter.Stream) error { _, err := stream.Write(jsonHeader) @@ -186,7 +187,7 @@ func (p *testErrorPayload) WriteFooter(stream *jsoniter.Stream) error { //nolint:revive // TODO(AML) Fix revive linter func (p *testErrorPayload) WriteItem(stream *jsoniter.Stream, i int) error { - return fmt.Errorf("some error") + return errors.New("some error") } func (p *testErrorPayload) Len() int { return 1 } @@ -487,7 +488,7 @@ func TestSendMetadata(t *testing.T) { require.Nil(t, err) f.AssertExpectations(t) - f.On("SubmitMetadata", jsonPayloads, s.jsonExtraHeadersWithCompression).Return(fmt.Errorf("some error")).Times(1) + f.On("SubmitMetadata", jsonPayloads, s.jsonExtraHeadersWithCompression).Return(errors.New("some error")).Times(1) err = s.SendMetadata(payload) require.NotNil(t, err) f.AssertExpectations(t) @@ -523,7 +524,7 @@ func TestSendProcessesMetadata(t *testing.T) { require.Nil(t, err) f.AssertExpectations(t) - f.On("SubmitV1Intake", payloads, s.jsonExtraHeadersWithCompression).Return(fmt.Errorf("some error")).Times(1) + f.On("SubmitV1Intake", payloads, s.jsonExtraHeadersWithCompression).Return(errors.New("some error")).Times(1) err = s.SendProcessesMetadata("test") require.NotNil(t, err) f.AssertExpectations(t) diff --git a/pkg/serverless/metrics/metric_test.go b/pkg/serverless/metrics/metric_test.go index 20345705cc49c1..f6e6225caaf474 100644 --- a/pkg/serverless/metrics/metric_test.go +++ b/pkg/serverless/metrics/metric_test.go @@ -6,6 +6,7 @@ package metrics import ( + "errors" "fmt" "net" "os" @@ -53,7 +54,7 @@ func TestStartDoesNotBlock(t *testing.T) { type InvalidMetricConfigMocked struct{} func (m *InvalidMetricConfigMocked) GetMultipleEndpoints() (utils.EndpointDescriptorSet, error) { - return nil, fmt.Errorf("error") + return nil, errors.New("error") } func TestStartInvalidConfig(t *testing.T) { @@ -71,7 +72,7 @@ type MetricDogStatsDMocked struct{} //nolint:revive // TODO(SERV) Fix revive linter func (m *MetricDogStatsDMocked) NewServer(_ aggregator.Demultiplexer) (dogstatsdServer.ServerlessDogstatsd, error) { - return nil, fmt.Errorf("error") + return nil, errors.New("error") } func TestStartInvalidDogStatsD(t *testing.T) { diff --git a/pkg/serverless/trace/trace.go b/pkg/serverless/trace/trace.go index 50226621af9564..7d475e11e4a154 100644 --- a/pkg/serverless/trace/trace.go +++ b/pkg/serverless/trace/trace.go @@ -7,7 +7,6 @@ package trace import ( "context" - "fmt" "net/http" "os" "strconv" @@ -233,7 +232,7 @@ func filterSpan(span *pb.Span) bool { // Filters out TCP spans to internal infrastructure if tcpHost, ok := span.Meta[tcpRemoteHostMetaKey]; ok { if tcpPort, ok := span.Meta[tcpRemotePortMetaKey]; ok { - tcpURLPrefix := fmt.Sprint("http://" + tcpHost + ":" + tcpPort) + tcpURLPrefix := "http://" + tcpHost + ":" + tcpPort if strings.HasPrefix(tcpURLPrefix, agentURLPrefix) { log.Debugf("Detected span with tcp url %s, removing it", tcpURLPrefix) return true diff --git a/pkg/serverless/trace/trace_test.go b/pkg/serverless/trace/trace_test.go index 38a44070b892d8..635b73ce61373a 100644 --- a/pkg/serverless/trace/trace_test.go +++ b/pkg/serverless/trace/trace_test.go @@ -8,7 +8,7 @@ package trace import ( - "fmt" + "errors" "os" "strconv" "testing" @@ -34,7 +34,7 @@ type LoadConfigMocked struct { } func (l *LoadConfigMocked) Load() (*config.AgentConfig, error) { - return nil, fmt.Errorf("error") + return nil, errors.New("error") } func TestStartEnabledTrueInvalidConfig(t *testing.T) { diff --git a/pkg/snmp/gosnmplib/gosnmp_auth_test.go b/pkg/snmp/gosnmplib/gosnmp_auth_test.go index 38ce2939c306c8..6b9fe14247cadb 100644 --- a/pkg/snmp/gosnmplib/gosnmp_auth_test.go +++ b/pkg/snmp/gosnmplib/gosnmp_auth_test.go @@ -6,7 +6,7 @@ package gosnmplib import ( - "fmt" + "errors" "testing" "github.com/gosnmp/gosnmp" @@ -22,7 +22,7 @@ func Test_getAuthProtocol(t *testing.T) { { "invalid", gosnmp.NoAuth, - fmt.Errorf("unsupported authentication protocol: invalid"), + errors.New("unsupported authentication protocol: invalid"), }, { "", @@ -83,7 +83,7 @@ func Test_getPrivProtocol(t *testing.T) { { "invalid", gosnmp.NoPriv, - fmt.Errorf("unsupported privacy protocol: invalid"), + errors.New("unsupported privacy protocol: invalid"), }, { "", diff --git a/pkg/snmp/snmp.go b/pkg/snmp/snmp.go index 2a7943eb0dc79f..d5f418208d9247 100644 --- a/pkg/snmp/snmp.go +++ b/pkg/snmp/snmp.go @@ -241,8 +241,8 @@ func NewListenerConfig() (ListenerConfig, error) { func (c *Config) LegacyDigest(address string) string { h := fnv.New64() // Hash write never returns an error - h.Write([]byte(address)) //nolint:errcheck - h.Write([]byte(fmt.Sprintf("%d", c.Port))) //nolint:errcheck + h.Write([]byte(address)) //nolint:errcheck + h.Write([]byte(strconv.FormatUint(uint64(c.Port), 10))) //nolint:errcheck h.Write([]byte(c.Version)) //nolint:errcheck h.Write([]byte(c.Community)) //nolint:errcheck @@ -274,8 +274,8 @@ func (c *Config) LegacyDigest(address string) string { func (c *Config) Digest(address string) string { h := fnv.New64() // Hash write never returns an error - h.Write([]byte(address)) //nolint:errcheck - h.Write([]byte(fmt.Sprintf("%d", c.Port))) //nolint:errcheck + h.Write([]byte(address)) //nolint:errcheck + h.Write([]byte(strconv.FormatUint(uint64(c.Port), 10))) //nolint:errcheck for _, authentication := range c.Authentications { h.Write([]byte(authentication.Version)) //nolint:errcheck diff --git a/pkg/snmp/snmpparse/gosnmp.go b/pkg/snmp/snmpparse/gosnmp.go index b511f576d55a93..e41fcfef6e7716 100644 --- a/pkg/snmp/snmpparse/gosnmp.go +++ b/pkg/snmp/snmpparse/gosnmp.go @@ -6,18 +6,20 @@ package snmpparse import ( + "errors" "fmt" - "github.com/DataDog/datadog-agent/comp/core/log/def" + "time" + + log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/snmptraps/snmplog" "github.com/gosnmp/gosnmp" - "time" ) // NewSNMP validates an SNMPConfig and builds a GoSNMP from it. func NewSNMP(conf *SNMPConfig, logger log.Component) (*gosnmp.GoSNMP, error) { // Communication options check if conf.Timeout == 0 { - return nil, fmt.Errorf("timeout cannot be 0") + return nil, errors.New("timeout cannot be 0") } var version gosnmp.SnmpVersion var ok bool @@ -39,7 +41,7 @@ func NewSNMP(conf *SNMPConfig, logger log.Component) (*gosnmp.GoSNMP, error) { // Authentication check if version == gosnmp.Version3 && conf.Username == "" { - return nil, fmt.Errorf("username is required for snmp v3") + return nil, errors.New("username is required for snmp v3") } port := conf.Port diff --git a/pkg/snmp/utils/utils.go b/pkg/snmp/utils/utils.go index 6933f56bd39d4a..15e4aad20ed656 100644 --- a/pkg/snmp/utils/utils.go +++ b/pkg/snmp/utils/utils.go @@ -8,7 +8,7 @@ package utils import ( "bytes" - "fmt" + "errors" ) // NormalizeNamespace applies policy according to hostname rule @@ -17,14 +17,14 @@ func NormalizeNamespace(namespace string) (string, error) { // namespace longer than 100 characters are illegal if len(namespace) > 100 { - return "", fmt.Errorf("namespace is too long, should contain less than 100 characters") + return "", errors.New("namespace is too long, should contain less than 100 characters") } for _, r := range namespace { switch r { // has null rune just toss the whole thing case '\x00': - return "", fmt.Errorf("namespace cannot contain null character") + return "", errors.New("namespace cannot contain null character") // drop these characters entirely case '\n', '\r', '\t': continue @@ -38,7 +38,7 @@ func NormalizeNamespace(namespace string) (string, error) { normalizedNamespace := buf.String() if normalizedNamespace == "" { - return "", fmt.Errorf("namespace cannot be empty") + return "", errors.New("namespace cannot be empty") } return normalizedNamespace, nil diff --git a/pkg/system-probe/api/server/listener_windows.go b/pkg/system-probe/api/server/listener_windows.go index 3bc2bdd862205d..c7dbcb19682725 100644 --- a/pkg/system-probe/api/server/listener_windows.go +++ b/pkg/system-probe/api/server/listener_windows.go @@ -6,6 +6,7 @@ package server import ( + "errors" "fmt" "net" "strings" @@ -49,11 +50,11 @@ func setupSecurityDescriptor() (string, error) { // Sanity checks if len(sidString) == 0 { - return "", fmt.Errorf("failed to get SID string from ddagentuser") + return "", errors.New("failed to get SID string from ddagentuser") } if sidString == everyoneSid { - return "", fmt.Errorf("ddagentuser as Everyone is not supported") + return "", errors.New("ddagentuser as Everyone is not supported") } sd, err := formatSecurityDescriptorWithSid(sidString) diff --git a/pkg/system-probe/config/config_linux_bpf_test.go b/pkg/system-probe/config/config_linux_bpf_test.go index 0ca77d7edb1f95..e7b0b855029836 100644 --- a/pkg/system-probe/config/config_linux_bpf_test.go +++ b/pkg/system-probe/config/config_linux_bpf_test.go @@ -8,7 +8,6 @@ package config import ( - "fmt" "os" "strconv" "testing" @@ -34,7 +33,7 @@ func TestNetworkProcessEventMonitoring(t *testing.T) { {network: true, netProcEvents: false, enabled: false}, {network: true, netProcEvents: true, enabled: true}, } { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { os.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLED", strconv.FormatBool(te.network)) os.Setenv("DD_SYSTEM_PROBE_EVENT_MONITORING_NETWORK_PROCESS_ENABLED", strconv.FormatBool(te.netProcEvents)) defer os.Unsetenv("DD_SYSTEM_PROBE_EVENT_MONITORING_NETWORK_PROCESS_ENABLED") diff --git a/pkg/system-probe/config/config_test.go b/pkg/system-probe/config/config_test.go index ad2b301a1e06c9..abecc7f0b70717 100644 --- a/pkg/system-probe/config/config_test.go +++ b/pkg/system-probe/config/config_test.go @@ -8,7 +8,6 @@ package config import ( - "fmt" "runtime" "strconv" "testing" @@ -38,7 +37,7 @@ func TestEventMonitor(t *testing.T) { {cws: false, fim: false, networkEvents: false, gpu: true, enabled: true}, {usmEvents: true, enabled: true}, } { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { t.Logf("%+v\n", tc) t.Setenv("DD_RUNTIME_SECURITY_CONFIG_ENABLED", strconv.FormatBool(tc.cws)) t.Setenv("DD_RUNTIME_SECURITY_CONFIG_FIM_ENABLED", strconv.FormatBool(tc.fim)) diff --git a/pkg/system-probe/config/config_windows.go b/pkg/system-probe/config/config_windows.go index dd9b7ce45d557a..01b143944fc6e1 100644 --- a/pkg/system-probe/config/config_windows.go +++ b/pkg/system-probe/config/config_windows.go @@ -8,7 +8,7 @@ package config import ( - "fmt" + "errors" "strings" "github.com/DataDog/datadog-agent/pkg/config/model" @@ -34,7 +34,7 @@ func init() { // ValidateSocketAddress validates that the sysprobe socket config option is of the correct format. func ValidateSocketAddress(sockAddress string) error { if !strings.HasPrefix(sockAddress, `\\.\pipe\`) { - return fmt.Errorf(`named pipe must be of the form '\\.\pipe\'`) + return errors.New(`named pipe must be of the form '\\.\pipe\'`) } return nil } diff --git a/pkg/trace/agent/agent_test.go b/pkg/trace/agent/agent_test.go index 1af7a8dc89d5cd..4c772d46b9ecf8 100644 --- a/pkg/trace/agent/agent_test.go +++ b/pkg/trace/agent/agent_test.go @@ -9,7 +9,6 @@ import ( "bytes" "context" "errors" - "fmt" "io" "math" "net/http" @@ -3886,7 +3885,7 @@ func TestSetFirstTraceTags(t *testing.T) { traceAgent.setFirstTraceTags(root) assert.Equal(t, cfg.InstallSignature.InstallID, root.Meta[tagInstallID]) assert.Equal(t, cfg.InstallSignature.InstallType, root.Meta[tagInstallType]) - assert.Equal(t, fmt.Sprintf("%v", cfg.InstallSignature.InstallTime), root.Meta[tagInstallTime]) + assert.Equal(t, strconv.FormatInt(cfg.InstallSignature.InstallTime, 10), root.Meta[tagInstallTime]) // Also make sure the tags are only set once per agent instance, // calling setFirstTraceTags on another span by the same agent should have no effect @@ -3918,7 +3917,7 @@ func TestSetFirstTraceTags(t *testing.T) { traceAgent.setFirstTraceTags(differentServiceRoot) assert.Equal(t, cfg.InstallSignature.InstallID, differentServiceRoot.Meta[tagInstallID]) assert.Equal(t, cfg.InstallSignature.InstallType, differentServiceRoot.Meta[tagInstallType]) - assert.Equal(t, fmt.Sprintf("%v", cfg.InstallSignature.InstallTime), differentServiceRoot.Meta[tagInstallTime]) + assert.Equal(t, strconv.FormatInt(cfg.InstallSignature.InstallTime, 10), differentServiceRoot.Meta[tagInstallTime]) }) traceAgent = NewTestAgent(ctx, cfg, telemetry.NewNoopCollector()) diff --git a/pkg/trace/agent/truncator_test.go b/pkg/trace/agent/truncator_test.go index f8908caf1d8cfe..7def1c6b2d9f6b 100644 --- a/pkg/trace/agent/truncator_test.go +++ b/pkg/trace/agent/truncator_test.go @@ -119,7 +119,7 @@ func TestTruncateStructuredMetaTag(t *testing.T) { a := &Agent{conf: config.New()} s := testSpan() structuredTagName := fmt.Sprintf("_dd.%s.%s", key, suffix) - notStructuredTagName := fmt.Sprintf("key.%s", suffix) + notStructuredTagName := "key." + suffix s.Meta[structuredTagName] = val s.Meta[notStructuredTagName] = val a.Truncate(s) diff --git a/pkg/trace/api/api_nix_test.go b/pkg/trace/api/api_nix_test.go index 95b22b7d6e7379..87cd09bb217513 100644 --- a/pkg/trace/api/api_nix_test.go +++ b/pkg/trace/api/api_nix_test.go @@ -125,7 +125,7 @@ func TestHTTPReceiverStart(t *testing.T) { socket := filepath.Join(t.TempDir(), "agent.sock") return true, 0, socket, []string{ "HTTP receiver disabled by config (apm_config.receiver_port: 0)", - fmt.Sprintf("Listening for traces at unix://%s", socket), + "Listening for traces at unix://" + socket, } }, "both": func() (bool, int, string, []string) { @@ -133,7 +133,7 @@ func TestHTTPReceiverStart(t *testing.T) { socket := filepath.Join(t.TempDir(), "agent.sock") return true, port, socket, []string{ fmt.Sprintf("Listening for traces at http://localhost:%d", port), - fmt.Sprintf("Listening for traces at unix://%s", socket), + "Listening for traces at unix://" + socket, } }, } { diff --git a/pkg/trace/api/debugger_test.go b/pkg/trace/api/debugger_test.go index 1a6b8d14812737..a93f66cbe59626 100644 --- a/pkg/trace/api/debugger_test.go +++ b/pkg/trace/api/debugger_test.go @@ -6,7 +6,6 @@ package api import ( - "fmt" "io" "net/http" "net/http/httptest" @@ -117,7 +116,7 @@ func TestDebuggerProxyHandler(t *testing.T) { called = true })) defer srv.Close() - req, err := http.NewRequest("POST", fmt.Sprintf("/some/path?ddtags=%s", tooLongString), nil) + req, err := http.NewRequest("POST", "/some/path?ddtags="+tooLongString, nil) assert.NoError(t, err) conf := getConf() conf.Hostname = "myhost" @@ -133,11 +132,11 @@ func TestDebuggerProxyHandler(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { ddtags := req.URL.Query().Get("ddtags") assert.False(t, strings.Contains(ddtags, "orchestrator"), "ddtags should not contain orchestrator: %v", ddtags) - assert.Equal(t, fmt.Sprintf("host:myhost,default_env:test,agent_version:v1,%s", extraTag), ddtags) + assert.Equal(t, "host:myhost,default_env:test,agent_version:v1,"+extraTag, ddtags) numCalls.Add(1) })) defer srv.Close() - req, err := http.NewRequest("POST", fmt.Sprintf("/some/path?ddtags=%s", extraTag), nil) + req, err := http.NewRequest("POST", "/some/path?ddtags="+extraTag, nil) assert.NoError(t, err) conf := getConf() conf.Hostname = "myhost" diff --git a/pkg/trace/api/evp_proxy.go b/pkg/trace/api/evp_proxy.go index f94e7fab5e8f43..01cb25306e74bc 100644 --- a/pkg/trace/api/evp_proxy.go +++ b/pkg/trace/api/evp_proxy.go @@ -8,6 +8,7 @@ package api import ( "bytes" "context" + "errors" "fmt" "io" stdlog "log" @@ -129,7 +130,7 @@ func (t *evpProxyTransport) RoundTrip(req *http.Request) (rresp *http.Response, // Sanitize the input, don't accept any valid URL but just some limited subset if len(subdomain) == 0 { - return nil, fmt.Errorf("EVPProxy: no subdomain specified") + return nil, errors.New("EVPProxy: no subdomain specified") } if !isValidSubdomain(subdomain) { return nil, fmt.Errorf("EVPProxy: invalid subdomain: %s", subdomain) @@ -143,7 +144,7 @@ func (t *evpProxyTransport) RoundTrip(req *http.Request) (rresp *http.Response, } if needsAppKey && t.conf.EVPProxy.ApplicationKey == "" { - return nil, fmt.Errorf("EVPProxy: ApplicationKey needed but not set") + return nil, errors.New("EVPProxy: ApplicationKey needed but not set") } // We don't want to forward arbitrary headers, create a copy of the input headers and clear them @@ -152,7 +153,7 @@ func (t *evpProxyTransport) RoundTrip(req *http.Request) (rresp *http.Response, // Set standard headers req.Header.Set("User-Agent", "") // Set to empty string so Go doesn't set its default - req.Header.Set("Via", fmt.Sprintf("trace-agent %s", t.conf.AgentVersion)) + req.Header.Set("Via", "trace-agent "+t.conf.AgentVersion) // Copy allowed headers from the input request for _, header := range EvpProxyAllowedHeaders { diff --git a/pkg/trace/api/evp_proxy_test.go b/pkg/trace/api/evp_proxy_test.go index 002842c5703cc3..0887220ee54ec3 100644 --- a/pkg/trace/api/evp_proxy_test.go +++ b/pkg/trace/api/evp_proxy_test.go @@ -8,13 +8,13 @@ package api import ( "bytes" "crypto/rand" - "fmt" "io" "log" "net/http" "net/http/httptest" "net/http/httputil" "net/url" + "strconv" "strings" "testing" "time" @@ -104,7 +104,7 @@ func TestEVPProxyForwarder(t *testing.T) { req.Header.Set("Content-Type", "text/json") proxyreqs, resp, logs := sendRequestThroughForwarderWithMockRoundTripper(conf, req, stats) - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) resp.Body.Close() require.Len(t, proxyreqs, 1) proxyreq := proxyreqs[0] @@ -156,7 +156,7 @@ func TestEVPProxyForwarder(t *testing.T) { proxyreqs, resp, logs := sendRequestThroughForwarderWithMockRoundTripper(conf, req, stats) resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Len(t, proxyreqs, 1) assert.Equal(t, "container:myid", proxyreqs[0].Header.Get("X-Datadog-Container-Tags")) assert.Equal(t, "myid", proxyreqs[0].Header.Get(header.ContainerID)) @@ -177,7 +177,7 @@ func TestEVPProxyForwarder(t *testing.T) { proxyreqs, resp, logs := sendRequestThroughForwarderWithMockRoundTripper(conf, req, stats) resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Len(t, proxyreqs, 1) assert.Equal(t, "container:myid,key:_val", proxyreqs[0].Header.Get("X-Datadog-Container-Tags")) assert.Equal(t, "myid", proxyreqs[0].Header.Get(header.ContainerID)) @@ -196,7 +196,7 @@ func TestEVPProxyForwarder(t *testing.T) { proxyreqs, resp, logs := sendRequestThroughForwarderWithMockRoundTripper(conf, req, stats) resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Len(t, proxyreqs, 3) assert.Equal(t, "my.subdomain.us3.datadoghq.com", proxyreqs[0].Host) @@ -230,7 +230,7 @@ func TestEVPProxyForwarder(t *testing.T) { resp.Body.Close() require.Len(t, proxyreqs, 0) - require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Contains(t, logs, "no subdomain") // check metrics @@ -254,7 +254,7 @@ func TestEVPProxyForwarder(t *testing.T) { resp.Body.Close() require.Len(t, proxyreqs, 0) - require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Contains(t, logs, "invalid subdomain") // check metrics @@ -278,7 +278,7 @@ func TestEVPProxyForwarder(t *testing.T) { resp.Body.Close() require.Len(t, proxyreqs, 0) - require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Contains(t, logs, "invalid target path") // check metrics @@ -305,7 +305,7 @@ func TestEVPProxyForwarder(t *testing.T) { resp.Body.Close() require.Len(t, proxyreqs, 0) - require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Contains(t, logs, "invalid query string") // check metrics @@ -391,7 +391,7 @@ func TestEVPProxyForwarder(t *testing.T) { resp.Body.Close() require.Len(t, proxyreqs, 0) - require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Contains(t, logs, "read limit reached") // check metrics @@ -431,7 +431,7 @@ func TestEVPProxyForwarder(t *testing.T) { proxyreqs, resp, logs := sendRequestThroughForwarderWithMockRoundTripper(conf, req, stats) resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Len(t, proxyreqs, 1) assert.Equal(t, "test_application_key", proxyreqs[0].Header.Get("DD-APPLICATION-KEY")) assert.Equal(t, "", logs) @@ -451,7 +451,7 @@ func TestEVPProxyForwarder(t *testing.T) { resp.Body.Close() require.Len(t, proxyreqs, 0) - require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) require.Contains(t, logs, "ApplicationKey needed but not set") // check metrics @@ -481,7 +481,7 @@ func TestEVPProxyForwarder(t *testing.T) { req.Header.Set("DD-CI-PROVIDER-NAME", "Allowed-Header") proxyreqs, resp, logs := sendRequestThroughForwarderWithMockRoundTripper(conf, req, stats) - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) resp.Body.Close() require.Len(t, proxyreqs, 1) proxyreq := proxyreqs[0] @@ -508,7 +508,7 @@ func TestEVPProxyForwarder(t *testing.T) { req.Header.Set("X-Datadog-EVP-Subdomain", "my.subdomain") proxyreqs, resp, logs := sendRequestThroughForwarderWithMockRoundTripper(conf, req, stats) - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) resp.Body.Close() require.Len(t, proxyreqs, 1) proxyreq := proxyreqs[0] @@ -529,7 +529,7 @@ func TestEVPProxyForwarder(t *testing.T) { req.Header.Set("X-Datadog-Error-Tracking-Standalone", "orig") proxyreqs, resp, logs := sendRequestThroughForwarderWithMockRoundTripper(conf, req, stats) - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) resp.Body.Close() require.Len(t, proxyreqs, 1) proxyreq := proxyreqs[0] @@ -581,7 +581,7 @@ func TestE2E(t *testing.T) { resp, logs := sendRequestThroughForwarderAgainstDummyServer(conf, req, stats, strings.TrimPrefix(server.URL, "http://")) resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) assert.Equal(t, "", logs) body, err := io.ReadAll(resp.Body) assert.NoError(t, err) @@ -604,7 +604,7 @@ func TestE2E(t *testing.T) { resp, logs := sendRequestThroughForwarderAgainstDummyServer(conf, req, stats, strings.TrimPrefix(server.URL, "http://")) resp.Body.Close() - require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusBadGateway, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) assert.Equal(t, "http: proxy error: context deadline exceeded\n", logs) }) @@ -627,7 +627,7 @@ func TestE2E(t *testing.T) { resp, logs := sendRequestThroughForwarderAgainstDummyServer(conf, req, stats, strings.TrimPrefix(server.URL, "http://")) resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", fmt.Sprint(resp.StatusCode)) + require.Equal(t, http.StatusOK, resp.StatusCode, "Got: ", strconv.Itoa(resp.StatusCode)) assert.Equal(t, "", logs) body, err := io.ReadAll(resp.Body) assert.NoError(t, err) diff --git a/pkg/trace/api/info.go b/pkg/trace/api/info.go index d1d2e0ed536488..90fe410228d901 100644 --- a/pkg/trace/api/info.go +++ b/pkg/trace/api/info.go @@ -7,6 +7,7 @@ package api import ( "crypto/sha256" + "encoding/hex" "encoding/json" "fmt" "net/http" @@ -144,7 +145,7 @@ func (r *HTTPReceiver) makeInfoHandler() (hash string, handler http.HandlerFunc) panic(fmt.Errorf("Error making /info handler: %v", err)) } h := sha256.Sum256(txt) - return fmt.Sprintf("%x", h), func(w http.ResponseWriter, req *http.Request) { + return hex.EncodeToString(h[:]), func(w http.ResponseWriter, req *http.Request) { containerID := r.containerIDProvider.GetContainerID(req.Context(), req.Header) if containerTags, err := r.conf.ContainerTags(containerID); err == nil { hash := computeContainerTagsHash(containerTags) diff --git a/pkg/trace/api/openlineage.go b/pkg/trace/api/openlineage.go index 5105fafd498806..6628624ede5b3f 100644 --- a/pkg/trace/api/openlineage.go +++ b/pkg/trace/api/openlineage.go @@ -87,7 +87,7 @@ func addOpenLineageAPIVersion(u *url.URL, version int) { func openLineageErrorHandler(message string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - msg := fmt.Sprintf("OpenLineage forwarder is OFF: %s", message) + msg := "OpenLineage forwarder is OFF: " + message http.Error(w, msg, http.StatusInternalServerError) }) } @@ -113,7 +113,7 @@ func newOpenLineageProxy(conf *config.AgentConfig, urls []*url.URL, keys []strin log.Debug("[openlineage] Creating reverse proxy") cidProvider := NewIDProvider(conf.ContainerProcRoot, conf.ContainerIDFromOriginInfo) director := func(req *http.Request) { - req.Header.Set("Via", fmt.Sprintf("trace-agent %s", conf.AgentVersion)) + req.Header.Set("Via", "trace-agent "+conf.AgentVersion) if _, ok := req.Header["User-Agent"]; !ok { // explicitly disable User-Agent so it's not set to the default value // that net/http gives it: Go-http-client/1.1 diff --git a/pkg/trace/api/otlp.go b/pkg/trace/api/otlp.go index 5a7116a45a589a..68b9c77e4b5fe5 100644 --- a/pkg/trace/api/otlp.go +++ b/pkg/trace/api/otlp.go @@ -298,7 +298,7 @@ func (o *OTLPReceiver) receiveResourceSpansV2(ctx context.Context, rspans ptrace tagstats := &info.TagStats{ Tags: info.Tags{ Lang: lang, - TracerVersion: fmt.Sprintf("otlp-%s", traceutil.GetOTelAttrVal(resourceAttributes, true, string(semconv.TelemetrySDKVersionKey))), + TracerVersion: "otlp-" + traceutil.GetOTelAttrVal(resourceAttributes, true, string(semconv.TelemetrySDKVersionKey)), EndpointVersion: "opentelemetry_grpc_v1", }, Stats: info.NewStats(), @@ -419,7 +419,7 @@ func (o *OTLPReceiver) receiveResourceSpansV1(ctx context.Context, rspans ptrace LangVersion: fastHeaderGet(httpHeader, header.LangVersion), Interpreter: fastHeaderGet(httpHeader, header.LangInterpreter), LangVendor: fastHeaderGet(httpHeader, header.LangInterpreterVendor), - TracerVersion: fmt.Sprintf("otlp-%s", rattr[string(semconv.TelemetrySDKVersionKey)]), + TracerVersion: "otlp-" + rattr[string(semconv.TelemetrySDKVersionKey)], EndpointVersion: "opentelemetry_grpc_v1", }, Stats: info.NewStats(), diff --git a/pkg/trace/api/pipeline_stats.go b/pkg/trace/api/pipeline_stats.go index b8daf12590bf7d..c8b5f257760b90 100644 --- a/pkg/trace/api/pipeline_stats.go +++ b/pkg/trace/api/pipeline_stats.go @@ -54,7 +54,7 @@ func (r *HTTPReceiver) pipelineStatsProxyHandler() http.Handler { } tags := fmt.Sprintf("host:%s,default_env:%s,agent_version:%s", r.conf.Hostname, r.conf.DefaultEnv, r.conf.AgentVersion) if orch := r.conf.FargateOrchestrator; orch != config.OrchestratorUnknown { - tag := fmt.Sprintf("orchestrator:fargate_%s", strings.ToLower(string(orch))) + tag := "orchestrator:fargate_" + strings.ToLower(string(orch)) tags = tags + "," + tag } return newPipelineStatsProxy(r.conf, urls, apiKeys, tags, r.statsd) @@ -73,7 +73,7 @@ func newPipelineStatsProxy(conf *config.AgentConfig, urls []*url.URL, apiKeys [] log.Debug("[pipeline_stats] Creating reverse proxy") cidProvider := NewIDProvider(conf.ContainerProcRoot, conf.ContainerIDFromOriginInfo) director := func(req *http.Request) { - req.Header.Set("Via", fmt.Sprintf("trace-agent %s", conf.AgentVersion)) + req.Header.Set("Via", "trace-agent "+conf.AgentVersion) if _, ok := req.Header["User-Agent"]; !ok { // explicitly disable User-Agent so it's not set to the default value // that net/http gives it: Go-http-client/1.1 diff --git a/pkg/trace/api/profiles.go b/pkg/trace/api/profiles.go index 06c815cb0f3a25..e021728cf12e51 100644 --- a/pkg/trace/api/profiles.go +++ b/pkg/trace/api/profiles.go @@ -84,7 +84,7 @@ func (r *HTTPReceiver) profileProxyHandler() http.Handler { tags.WriteString(fmt.Sprintf("host:%s,default_env:%s,agent_version:%s", r.conf.Hostname, r.conf.DefaultEnv, r.conf.AgentVersion)) if orch := r.conf.FargateOrchestrator; orch != config.OrchestratorUnknown { - tags.WriteString(fmt.Sprintf(",orchestrator:fargate_%s", strings.ToLower(string(orch)))) + tags.WriteString(",orchestrator:fargate_" + strings.ToLower(string(orch))) } if r.conf.AzureServerlessTags != "" { tags.WriteString(r.conf.AzureServerlessTags) @@ -139,7 +139,7 @@ func isRetryableBodyReadError(err error) bool { func newProfileProxy(conf *config.AgentConfig, targets []*url.URL, keys []string, tags string, statsd statsd.ClientInterface) *httputil.ReverseProxy { cidProvider := NewIDProvider(conf.ContainerProcRoot, conf.ContainerIDFromOriginInfo) director := func(req *http.Request) { - req.Header.Set("Via", fmt.Sprintf("trace-agent %s", conf.AgentVersion)) + req.Header.Set("Via", "trace-agent "+conf.AgentVersion) if _, ok := req.Header["User-Agent"]; !ok { // explicitly disable User-Agent so it's not set to the default value // that net/http gives it: Go-http-client/1.1 diff --git a/pkg/trace/api/responses.go b/pkg/trace/api/responses.go index f735e0674b3e8c..156639afb1de67 100644 --- a/pkg/trace/api/responses.go +++ b/pkg/trace/api/responses.go @@ -8,7 +8,6 @@ package api import ( "context" "encoding/json" - "fmt" "io" "net" "net/http" @@ -66,7 +65,7 @@ func httpDecodingError(err error, tags []string, w http.ResponseWriter, statsd s msg = errtag } - tags = append(tags, fmt.Sprintf("error:%s", errtag)) + tags = append(tags, "error:"+errtag) _ = statsd.Count(receiverErrorKey, 1, tags, 1) http.Error(w, msg, status) } diff --git a/pkg/trace/api/symdb_test.go b/pkg/trace/api/symdb_test.go index 0a8cf645e1e4e3..fbe4b3d832fd97 100644 --- a/pkg/trace/api/symdb_test.go +++ b/pkg/trace/api/symdb_test.go @@ -6,7 +6,6 @@ package api import ( - "fmt" "io" "net/http" "net/http/httptest" @@ -69,11 +68,11 @@ func TestSymDBProxyHandler(t *testing.T) { var numCalls atomic.Int32 srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { ddtags := req.Header.Get("X-Datadog-Additional-Tags") - assert.Equal(t, fmt.Sprintf("host:myhost,default_env:test,agent_version:v1,%s", extraTag), ddtags) + assert.Equal(t, "host:myhost,default_env:test,agent_version:v1,"+extraTag, ddtags) numCalls.Add(1) })) defer srv.Close() - req, err := http.NewRequest("POST", fmt.Sprintf("/some/path?ddtags=%s", extraTag), nil) + req, err := http.NewRequest("POST", "/some/path?ddtags="+extraTag, nil) assert.NoError(t, err) conf := getSymDBConf(srv.URL) receiver := newTestReceiverFromConfig(conf) diff --git a/pkg/trace/api/telemetry.go b/pkg/trace/api/telemetry.go index 23ffacf727ab30..800ef5ccf5f162 100644 --- a/pkg/trace/api/telemetry.go +++ b/pkg/trace/api/telemetry.go @@ -8,7 +8,6 @@ package api import ( "bytes" "context" - "fmt" "io" "net/http" "net/url" @@ -234,7 +233,7 @@ func writeEmptyJSON(w http.ResponseWriter, statusCode int) { } func (f *TelemetryForwarder) setRequestHeader(req *http.Request) { - req.Header.Set("Via", fmt.Sprintf("trace-agent %s", f.conf.AgentVersion)) + req.Header.Set("Via", "trace-agent "+f.conf.AgentVersion) if _, ok := req.Header["User-Agent"]; !ok { // explicitly disable User-Agent so it's not set to the default value // that net/http gives it: Go-http-client/1.1 @@ -334,7 +333,7 @@ func (f *TelemetryForwarder) forwardTelemetry(req forwardedRequest) { func (f *TelemetryForwarder) forwardTelemetryEndpoint(req *http.Request, endpoint *config.Endpoint) (*http.Response, error) { tags := []string{ - fmt.Sprintf("endpoint:%s", endpoint.Host), + "endpoint:" + endpoint.Host, } defer func(now time.Time) { _ = f.statsd.Timing("datadog.trace_agent.telemetry_proxy.roundtrip_ms", time.Since(now), tags, 1) diff --git a/pkg/trace/api/transports.go b/pkg/trace/api/transports.go index feee4425899e74..05b67607ae8303 100644 --- a/pkg/trace/api/transports.go +++ b/pkg/trace/api/transports.go @@ -38,11 +38,11 @@ func newMeasuringTransport(rt http.RoundTripper, prefix string, tags []string, s // RoundTrip makes an HTTP round trip measuring request count and timing. func (m *measuringTransport) RoundTrip(req *http.Request) (rres *http.Response, rerr error) { defer func(start time.Time) { - _ = m.statsd.Count(fmt.Sprintf("%s.proxy_request", m.prefix), 1, m.tags, 1) - _ = m.statsd.Timing(fmt.Sprintf("%s.proxy_request_duration_ms", m.prefix), time.Since(start), m.tags, 1) + _ = m.statsd.Count(m.prefix+".proxy_request", 1, m.tags, 1) + _ = m.statsd.Timing(m.prefix+".proxy_request_duration_ms", time.Since(start), m.tags, 1) if rerr != nil { - tags := append(m.tags, fmt.Sprintf("error:%s", fmt.Sprintf("%T", rerr))) - _ = m.statsd.Count(fmt.Sprintf("%s.proxy_request_error", m.prefix), 1, tags, 1) + tags := append(m.tags, "error:"+fmt.Sprintf("%T", rerr)) + _ = m.statsd.Count(m.prefix+".proxy_request_error", 1, tags, 1) } }(time.Now()) return m.rt.RoundTrip(req) diff --git a/pkg/trace/remoteconfighandler/remote_config_handler.go b/pkg/trace/remoteconfighandler/remote_config_handler.go index 6f9f51d11e0f48..5db90e2b30cbe4 100644 --- a/pkg/trace/remoteconfighandler/remote_config_handler.go +++ b/pkg/trace/remoteconfighandler/remote_config_handler.go @@ -78,9 +78,7 @@ func New(conf *config.AgentConfig, prioritySampler prioritySampler, rareSampler TLSClientConfig: conf.IPCTLSClientConfig, }, }, - configSetEndpointFormatString: fmt.Sprintf( - "https://127.0.0.1:%s/config/set?log_level=%%s", strconv.Itoa(conf.DebugServerPort), - ), + configSetEndpointFormatString: "https://127.0.0.1:" + strconv.Itoa(conf.DebugServerPort) + "/config/set?log_level=%s", } } diff --git a/pkg/trace/sampler/scoresampler_test.go b/pkg/trace/sampler/scoresampler_test.go index 1b7658bde8d037..adfac40540f2c0 100644 --- a/pkg/trace/sampler/scoresampler_test.go +++ b/pkg/trace/sampler/scoresampler_test.go @@ -6,7 +6,6 @@ package sampler import ( - "fmt" "math/rand" "strconv" "testing" @@ -153,7 +152,7 @@ func BenchmarkSampler(b *testing.B) { for i := 0; i < b.N; i++ { trace := pb.Trace{ - &pb.Span{TraceID: 1, SpanID: 1, ParentID: 0, Start: 42, Duration: 1000000000, Service: "mcnulty", Type: "web", Resource: fmt.Sprint(rand.Intn(signatureCount))}, + &pb.Span{TraceID: 1, SpanID: 1, ParentID: 0, Start: 42, Duration: 1000000000, Service: "mcnulty", Type: "web", Resource: strconv.Itoa(rand.Intn(signatureCount))}, &pb.Span{TraceID: 1, SpanID: 2, ParentID: 1, Start: 100, Duration: 200000000, Service: "mcnulty", Type: "sql"}, &pb.Span{TraceID: 1, SpanID: 3, ParentID: 2, Start: 150, Duration: 199999000, Service: "master-db", Type: "sql"}, &pb.Span{TraceID: 1, SpanID: 4, ParentID: 1, Start: 500000000, Duration: 500000, Service: "redis", Type: "redis"}, diff --git a/pkg/trace/stats/statsraw_test.go b/pkg/trace/stats/statsraw_test.go index f805be5d8a5348..d392e2adb1d80c 100644 --- a/pkg/trace/stats/statsraw_test.go +++ b/pkg/trace/stats/statsraw_test.go @@ -7,6 +7,7 @@ package stats import ( "fmt" + "strconv" "testing" "time" @@ -68,7 +69,7 @@ func TestGrainWithPeerTags(t *testing.T) { }) t.Run("computeBySpanKind config", func(t *testing.T) { for _, spanKindEnabled := range []bool{true, false} { - t.Run(fmt.Sprintf("%t", spanKindEnabled), func(t *testing.T) { + t.Run(strconv.FormatBool(spanKindEnabled), func(t *testing.T) { assert := assert.New(t) sci := NewSpanConcentrator(&SpanConcentratorConfig{ ComputeStatsBySpanKind: spanKindEnabled, diff --git a/pkg/trace/telemetry/collector.go b/pkg/trace/telemetry/collector.go index aaa0b387df7d48..fd2389df67c45d 100644 --- a/pkg/trace/telemetry/collector.go +++ b/pkg/trace/telemetry/collector.go @@ -9,6 +9,7 @@ package telemetry import ( "bytes" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -70,7 +71,7 @@ type OnboardingEventTags struct { Env string `json:"env,omitempty"` } -var errReceivedUnsuccessfulStatusCode = fmt.Errorf("received a 4XX or 5xx error code while submitting telemetry data") +var errReceivedUnsuccessfulStatusCode = errors.New("received a 4XX or 5xx error code while submitting telemetry data") // OnboardingEventError ... type OnboardingEventError struct { diff --git a/pkg/trace/telemetry/collector_test.go b/pkg/trace/telemetry/collector_test.go index 8aa201c314b530..9099cb64ab17a9 100644 --- a/pkg/trace/telemetry/collector_test.go +++ b/pkg/trace/telemetry/collector_test.go @@ -7,6 +7,7 @@ package telemetry import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -115,7 +116,7 @@ func TestTelemetryDisabled(t *testing.T) { server.assertReq = func(_ *http.Request) { t.Fail() } - collector.SendStartupError(GenericError, fmt.Errorf("")) + collector.SendStartupError(GenericError, errors.New("")) collector.SendStartupSuccess() } @@ -134,7 +135,7 @@ func TestTelemetryPath(t *testing.T) { path = req.URL.Path } - collector.SendStartupError(GenericError, fmt.Errorf("")) + collector.SendStartupError(GenericError, errors.New("")) collector.SendStartupSuccess() assert.Equal(t, 1, reqCount) @@ -150,7 +151,7 @@ func TestNoSuccessAfterError(t *testing.T) { receiver := newOnboardingEventReceiver(server, t) - collector.SendStartupError(GenericError, fmt.Errorf("")) + collector.SendStartupError(GenericError, errors.New("")) collector.SendStartupSuccess() events := receiver.waitN(1, t) @@ -167,7 +168,7 @@ func TestErrorAfterSuccess(t *testing.T) { receiver := newOnboardingEventReceiver(server, t) collector.SendStartupSuccess() - collector.SendStartupError(GenericError, fmt.Errorf("")) + collector.SendStartupError(GenericError, errors.New("")) events := receiver.waitN(2, t) assert.Equal(t, "agent.startup.success", events[0].Payload.EventName) diff --git a/pkg/trace/timing/timing_test.go b/pkg/trace/timing/timing_test.go index b06d9dcd28eb09..135caf1598ada5 100644 --- a/pkg/trace/timing/timing_test.go +++ b/pkg/trace/timing/timing_test.go @@ -6,8 +6,8 @@ package timing import ( - "fmt" "math/rand" + "strconv" "sync" "testing" "time" @@ -63,7 +63,7 @@ func TestTiming(t *testing.T) { go func() { defer wg.Done() set.Since("counter1", time.Now().Add(-time.Second)) - set.Since(fmt.Sprintf("%d", rand.Int()), time.Now().Add(-time.Second)) + set.Since(strconv.Itoa(rand.Int()), time.Now().Add(-time.Second)) }() } for i := 0; i < 150; i++ { diff --git a/pkg/trace/transform/transform.go b/pkg/trace/transform/transform.go index a3b05fdb182628..393fda2e45b41e 100644 --- a/pkg/trace/transform/transform.go +++ b/pkg/trace/transform/transform.go @@ -182,7 +182,7 @@ func GetDDKeyForOTLPAttribute(k string) string { case found: break case strings.HasPrefix(k, "http.request.header."): - mappedKey = fmt.Sprintf("http.request.headers.%s", strings.TrimPrefix(k, "http.request.header.")) + mappedKey = "http.request.headers." + strings.TrimPrefix(k, "http.request.header.") case !isDatadogAPMConventionKey(k): mappedKey = k default: diff --git a/pkg/trace/writer/trace_test.go b/pkg/trace/writer/trace_test.go index 24ebc2d454c5ee..4ea6aae63ac06e 100644 --- a/pkg/trace/writer/trace_test.go +++ b/pkg/trace/writer/trace_test.go @@ -6,7 +6,6 @@ package writer import ( - "fmt" "io" "net/http" "net/http/httptest" @@ -62,7 +61,7 @@ func TestTraceWriter(t *testing.T) { {zstd.NewComponent()}, } for _, tc := range testCases { - t.Run(fmt.Sprintf("encoding:%s", tc.compressor.Encoding()), func(t *testing.T) { + t.Run("encoding:"+tc.compressor.Encoding(), func(t *testing.T) { srv := newTestServer() defer srv.Close() cfg := &config.AgentConfig{ diff --git a/pkg/trace/writer/tracev1_test.go b/pkg/trace/writer/tracev1_test.go index 399dd9529d2f09..5da9f2adb06657 100644 --- a/pkg/trace/writer/tracev1_test.go +++ b/pkg/trace/writer/tracev1_test.go @@ -7,7 +7,6 @@ package writer import ( "bytes" - "fmt" "io" "net/url" "testing" @@ -33,7 +32,7 @@ func TestTraceWriterV1(t *testing.T) { {zstd.NewComponent()}, } for _, tc := range testCases { - t.Run(fmt.Sprintf("encoding:%s", tc.compressor.Encoding()), func(t *testing.T) { + t.Run("encoding:"+tc.compressor.Encoding(), func(t *testing.T) { srv := newTestServer() defer srv.Close() cfg := &config.AgentConfig{ diff --git a/pkg/util/cachedfetch/fetcher_test.go b/pkg/util/cachedfetch/fetcher_test.go index 56ff9f8645d0e5..d39165e29f90a7 100644 --- a/pkg/util/cachedfetch/fetcher_test.go +++ b/pkg/util/cachedfetch/fetcher_test.go @@ -7,6 +7,7 @@ package cachedfetch import ( "context" + "errors" "fmt" "testing" @@ -16,7 +17,7 @@ import ( // If Attempt never succeeds, f.Fetch returns an error func TestFetcherNeverSucceeds(t *testing.T) { f := Fetcher{ - Attempt: func(context.Context) (interface{}, error) { return nil, fmt.Errorf("uhoh") }, + Attempt: func(context.Context) (interface{}, error) { return nil, errors.New("uhoh") }, } v, err := f.Fetch(context.TODO()) @@ -55,7 +56,7 @@ func TestFetcherUsesCachedValue(t *testing.T) { Attempt: func(context.Context) (interface{}, error) { count++ if count%2 == 0 { - return nil, fmt.Errorf("uhoh") + return nil, errors.New("uhoh") } return count, nil }, @@ -76,7 +77,7 @@ func TestFetcherLogsWhenUsingCached(t *testing.T) { Attempt: func(context.Context) (interface{}, error) { count++ if count%2 == 0 { - return nil, fmt.Errorf("uhoh") + return nil, errors.New("uhoh") } return count, nil }, @@ -107,7 +108,7 @@ func TestFetchString(t *testing.T) { // FetchString casts to a string func TestFetchStringError(t *testing.T) { f := Fetcher{ - Attempt: func(context.Context) (interface{}, error) { return nil, fmt.Errorf("uhoh") }, + Attempt: func(context.Context) (interface{}, error) { return nil, errors.New("uhoh") }, } v, err := f.FetchString(context.TODO()) require.Equal(t, "", v) @@ -127,7 +128,7 @@ func TestFetchStringSlice(t *testing.T) { // FetchStringSlice casts to a []string func TestFetchStringSliceError(t *testing.T) { f := Fetcher{ - Attempt: func(context.Context) (interface{}, error) { return nil, fmt.Errorf("uhoh") }, + Attempt: func(context.Context) (interface{}, error) { return nil, errors.New("uhoh") }, } v, err := f.FetchStringSlice(context.TODO()) require.Nil(t, v) @@ -136,7 +137,7 @@ func TestFetchStringSliceError(t *testing.T) { func TestReset(t *testing.T) { succeed := func(context.Context) (interface{}, error) { return "yay", nil } - fail := func(context.Context) (interface{}, error) { return nil, fmt.Errorf("uhoh") } + fail := func(context.Context) (interface{}, error) { return nil, errors.New("uhoh") } f := Fetcher{} f.Attempt = succeed diff --git a/pkg/util/cgroups/file_for_test.go b/pkg/util/cgroups/file_for_test.go index 328ee2c23fe2d2..80295dcf3801bb 100644 --- a/pkg/util/cgroups/file_for_test.go +++ b/pkg/util/cgroups/file_for_test.go @@ -24,7 +24,7 @@ func containerCgroupKubePod(systemd bool) string { if systemd { return fmt.Sprintf("kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3922967_14e1_4867_9388_461bac94b37e.slice/crio-%s.scope", cID) } - return fmt.Sprintf("kubepods/kubepods-besteffort/kubepods-besteffort-podb3922967_14e1_4867_9388_461bac94b37e/%s", cID) + return "kubepods/kubepods-besteffort/kubepods-besteffort-podb3922967_14e1_4867_9388_461bac94b37e/" + cID } type memoryFile struct { diff --git a/pkg/util/cloudproviders/alibaba/alibaba.go b/pkg/util/cloudproviders/alibaba/alibaba.go index 9ad502735e433c..b8029ae1aeaeed 100644 --- a/pkg/util/cloudproviders/alibaba/alibaba.go +++ b/pkg/util/cloudproviders/alibaba/alibaba.go @@ -8,6 +8,7 @@ package alibaba import ( "context" + "errors" "fmt" "time" @@ -38,7 +39,7 @@ var instanceIDFetcher = cachedfetch.Fetcher{ Name: "Alibaba InstanceID", Attempt: func(ctx context.Context) (interface{}, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return nil, fmt.Errorf("cloud provider is disabled by configuration") + return nil, errors.New("cloud provider is disabled by configuration") } endpoint := metadataURL + "/latest/meta-data/instance-id" diff --git a/pkg/util/cloudproviders/azure/azure.go b/pkg/util/cloudproviders/azure/azure.go index ff1ab63dd8e5c3..8c6bcea9ce07ba 100644 --- a/pkg/util/cloudproviders/azure/azure.go +++ b/pkg/util/cloudproviders/azure/azure.go @@ -9,6 +9,7 @@ package azure import ( "context" "encoding/json" + "errors" "fmt" "strings" "time" @@ -125,7 +126,7 @@ func getResponseWithMaxLength(ctx context.Context, endpoint string, maxLength in func getResponse(ctx context.Context, url string) (string, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return "", fmt.Errorf("cloud provider is disabled by configuration") + return "", errors.New("cloud provider is disabled by configuration") } timeout := time.Duration(pkgconfigsetup.Datadog().GetInt("azure_metadata_timeout")) * time.Millisecond @@ -153,7 +154,7 @@ func getHostnameWithConfig(ctx context.Context, config model.Config) (string, er style := config.GetString(hostnameStyleSetting) if style == "os" { - return "", fmt.Errorf("azure_hostname_style is set to 'os'") + return "", errors.New("azure_hostname_style is set to 'os'") } metadataJSON, err := instanceMetaFetcher.FetchString(ctx) diff --git a/pkg/util/cloudproviders/cloudfoundry/bbscache.go b/pkg/util/cloudproviders/cloudfoundry/bbscache.go index 7efa7f60801daa..2fb473d0deb8ae 100644 --- a/pkg/util/cloudproviders/cloudfoundry/bbscache.go +++ b/pkg/util/cloudproviders/cloudfoundry/bbscache.go @@ -10,6 +10,7 @@ package cloudfoundry import ( "context" + "errors" "fmt" "regexp" "sync" @@ -122,7 +123,7 @@ func ConfigureGlobalBBSCache(ctx context.Context, bbsURL, cafile, certfile, keyf // GetGlobalBBSCache returns the global instance of BBSCache (or error if the instance is not configured yet) func GetGlobalBBSCache() (*BBSCache, error) { if !globalBBSCache.configured { - return nil, fmt.Errorf("global BBS Cache not configured") + return nil, errors.New("global BBS Cache not configured") } return globalBBSCache, nil } diff --git a/pkg/util/cloudproviders/cloudfoundry/cccache.go b/pkg/util/cloudproviders/cloudfoundry/cccache.go index 79325c80d02fc4..33bf45e9f81810 100644 --- a/pkg/util/cloudproviders/cloudfoundry/cccache.go +++ b/pkg/util/cloudproviders/cloudfoundry/cccache.go @@ -9,8 +9,10 @@ package cloudfoundry import ( "context" + "errors" "fmt" "net/url" + "strconv" "strings" "sync" "time" @@ -159,7 +161,7 @@ func GetGlobalCCCache() (*CCCache, error) { globalCCCache.Lock() defer globalCCCache.Unlock() if !globalCCCache.configured { - return nil, fmt.Errorf("global CC Cache not configured") + return nil, errors.New("global CC Cache not configured") } return globalCCCache, nil } @@ -209,7 +211,7 @@ func getResource[T any](ccc *CCCache, resourceName, guid string, cache map[strin ccc.RUnlock() if !updatedOnce { - return resource, fmt.Errorf("cannot refresh cache on miss, cccache is still warming up") + return resource, errors.New("cannot refresh cache on miss, cccache is still warming up") } resourceLock := ccc.getLockForResource(resourceName, guid) @@ -368,7 +370,7 @@ func (ccc *CCCache) GetIsolationSegmentForOrg(guid string) (*cfclient.IsolationS func (ccc *CCCache) fetchProcessesByAppGUID(appGUID string) ([]*cfclient.Process, error) { query := url.Values{} - query.Add("per_page", fmt.Sprintf("%d", ccc.appsBatchSize)) + query.Add("per_page", strconv.Itoa(ccc.appsBatchSize)) // fetch processes from the CAPI processes, err := ccc.ccAPIClient.ListProcessByAppGUID(query, appGUID) @@ -445,7 +447,7 @@ func (ccc *CCCache) listApplications(wg *sync.WaitGroup, appsMap *map[string]*cf go func() { defer wg.Done() query := url.Values{} - query.Add("per_page", fmt.Sprintf("%d", ccc.appsBatchSize)) + query.Add("per_page", strconv.Itoa(ccc.appsBatchSize)) apps, err = ccc.ccAPIClient.ListV3AppsByQuery(query) if err != nil { log.Errorf("Failed listing apps from cloud controller: %v", err) @@ -484,7 +486,7 @@ func (ccc *CCCache) listSpaces(wg *sync.WaitGroup, spacesMap *map[string]*cfclie go func() { defer wg.Done() query := url.Values{} - query.Add("per_page", fmt.Sprintf("%d", ccc.appsBatchSize)) + query.Add("per_page", strconv.Itoa(ccc.appsBatchSize)) spaces, err := ccc.ccAPIClient.ListV3SpacesByQuery(query) if err != nil { log.Errorf("Failed listing spaces from cloud controller: %v", err) @@ -503,7 +505,7 @@ func (ccc *CCCache) listOrgs(wg *sync.WaitGroup, orgsMap *map[string]*cfclient.V go func() { defer wg.Done() query := url.Values{} - query.Add("per_page", fmt.Sprintf("%d", ccc.appsBatchSize)) + query.Add("per_page", strconv.Itoa(ccc.appsBatchSize)) orgs, err := ccc.ccAPIClient.ListV3OrganizationsByQuery(query) if err != nil { log.Errorf("Failed listing orgs from cloud controller: %v", err) @@ -522,7 +524,7 @@ func (ccc *CCCache) listOrgQuotas(wg *sync.WaitGroup, orgQuotasMap *map[string]* go func() { defer wg.Done() query := url.Values{} - query.Add("per_page", fmt.Sprintf("%d", ccc.appsBatchSize)) + query.Add("per_page", strconv.Itoa(ccc.appsBatchSize)) orgQuotas, err := ccc.ccAPIClient.ListOrgQuotasByQuery(query) if err != nil { log.Errorf("Failed listing org quotas from cloud controller: %v", err) @@ -544,7 +546,7 @@ func (ccc *CCCache) listProcesses(wg *sync.WaitGroup, processesMap *map[string][ go func() { defer wg.Done() query := url.Values{} - query.Add("per_page", fmt.Sprintf("%d", ccc.appsBatchSize)) + query.Add("per_page", strconv.Itoa(ccc.appsBatchSize)) processes, err := ccc.ccAPIClient.ListAllProcessesByQuery(query) if err != nil { log.Errorf("Failed listing processes from cloud controller: %v", err) @@ -572,7 +574,7 @@ func (ccc *CCCache) listIsolationSegments(wg *sync.WaitGroup, segmentBySpaceGUID go func() { defer wg.Done() query := url.Values{} - query.Add("per_page", fmt.Sprintf("%d", ccc.appsBatchSize)) + query.Add("per_page", strconv.Itoa(ccc.appsBatchSize)) segments, err := ccc.ccAPIClient.ListIsolationSegmentsByQuery(query) if err != nil { log.Errorf("Failed listing isolation segments from cloud controller: %v", err) diff --git a/pkg/util/cloudproviders/cloudfoundry/types_test.go b/pkg/util/cloudproviders/cloudfoundry/types_test.go index 7ef1c549da5d80..1c81547516db79 100644 --- a/pkg/util/cloudproviders/cloudfoundry/types_test.go +++ b/pkg/util/cloudproviders/cloudfoundry/types_test.go @@ -8,7 +8,6 @@ package cloudfoundry import ( - "fmt" "regexp" "testing" @@ -381,7 +380,7 @@ func TestADIdentifier(t *testing.T) { expected: "4321/flask-app/instance-guid", }, } { - t.Run(fmt.Sprintf("svcName=%s", tc.svcName), func(t *testing.T) { + t.Run("svcName="+tc.svcName, func(t *testing.T) { var i ADIdentifier if tc.aLRP == nil { i = NewADNonContainerIdentifier(tc.dLRP, tc.svcName) diff --git a/pkg/util/cloudproviders/cloudproviders_test.go b/pkg/util/cloudproviders/cloudproviders_test.go index d2a26fa85a59e7..384a7cea5accd2 100644 --- a/pkg/util/cloudproviders/cloudproviders_test.go +++ b/pkg/util/cloudproviders/cloudproviders_test.go @@ -7,7 +7,7 @@ package cloudproviders import ( "context" - "fmt" + "errors" "strings" "testing" @@ -39,7 +39,7 @@ func TestCloudProviderAliases(t *testing.T) { isCloudEnv: true, callback: func(_ context.Context) ([]string, error) { detector2Called = true - return nil, fmt.Errorf("error from detector2") + return nil, errors.New("error from detector2") }, }, { diff --git a/pkg/util/cloudproviders/gce/gce.go b/pkg/util/cloudproviders/gce/gce.go index afbbbefa6b87a8..a684e9f5e62043 100644 --- a/pkg/util/cloudproviders/gce/gce.go +++ b/pkg/util/cloudproviders/gce/gce.go @@ -7,6 +7,7 @@ package gce import ( "context" + "errors" "fmt" "strings" "time" @@ -179,11 +180,11 @@ var networkIDFetcher = cachedfetch.Fetcher{ switch len(vpcIDs) { case 0: - return "", fmt.Errorf("zero network interfaces detected") + return "", errors.New("zero network interfaces detected") case 1: return vpcIDs.GetAll()[0], nil default: - return "", fmt.Errorf("more than one network interface detected, cannot get network ID") + return "", errors.New("more than one network interface detected, cannot get network ID") } }, } @@ -272,7 +273,7 @@ func getResponseWithMaxLength(ctx context.Context, endpoint string, maxLength in func getResponse(ctx context.Context, url string) (string, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return "", fmt.Errorf("cloud provider is disabled by configuration") + return "", errors.New("cloud provider is disabled by configuration") } res, err := httputils.Get(ctx, url, map[string]string{"Metadata-Flavor": "Google"}, pkgconfigsetup.Datadog().GetDuration("gce_metadata_timeout")*time.Millisecond, pkgconfigsetup.Datadog()) @@ -282,7 +283,7 @@ func getResponse(ctx context.Context, url string) (string, error) { // Some cloud platforms will respond with an empty body, causing the agent to assume a faulty hostname if len(res) <= 0 { - return "", fmt.Errorf("empty response body") + return "", errors.New("empty response body") } return res, nil diff --git a/pkg/util/cloudproviders/gce/gce_tags.go b/pkg/util/cloudproviders/gce/gce_tags.go index bebd86483b7fd9..ab79309b0fb460 100644 --- a/pkg/util/cloudproviders/gce/gce_tags.go +++ b/pkg/util/cloudproviders/gce/gce_tags.go @@ -8,6 +8,7 @@ package gce import ( "context" "encoding/json" + "errors" "fmt" "slices" "strings" @@ -53,7 +54,7 @@ func getCachedTags(err error) ([]string, error) { func GetTags(ctx context.Context) ([]string, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return nil, fmt.Errorf("cloud provider is disabled by configuration") + return nil, errors.New("cloud provider is disabled by configuration") } metadataResponse, err := getResponse(ctx, metadataURL+"/?recursive=true") @@ -71,22 +72,22 @@ func GetTags(ctx context.Context) ([]string, error) { tags := metadata.Instance.Tags if metadata.Instance.Zone != "" { ts := strings.Split(metadata.Instance.Zone, "/") - tags = append(tags, fmt.Sprintf("zone:%s", ts[len(ts)-1])) + tags = append(tags, "zone:"+ts[len(ts)-1]) } if metadata.Instance.MachineType != "" { ts := strings.Split(metadata.Instance.MachineType, "/") - tags = append(tags, fmt.Sprintf("instance-type:%s", ts[len(ts)-1])) + tags = append(tags, "instance-type:"+ts[len(ts)-1]) } if metadata.Instance.Hostname != "" { - tags = append(tags, fmt.Sprintf("internal-hostname:%s", metadata.Instance.Hostname)) + tags = append(tags, "internal-hostname:"+metadata.Instance.Hostname) } if metadata.Instance.ID != 0 { tags = append(tags, fmt.Sprintf("instance-id:%d", metadata.Instance.ID)) } if metadata.Project.ProjectID != "" { - tags = append(tags, fmt.Sprintf("project:%s", metadata.Project.ProjectID)) + tags = append(tags, "project:"+metadata.Project.ProjectID) if pkgconfigsetup.Datadog().GetBool("gce_send_project_id_tag") { - tags = append(tags, fmt.Sprintf("project_id:%s", metadata.Project.ProjectID)) + tags = append(tags, "project_id:"+metadata.Project.ProjectID) } } if metadata.Project.NumericProjectID != 0 { diff --git a/pkg/util/cloudproviders/ibm/ibm.go b/pkg/util/cloudproviders/ibm/ibm.go index dfdf193864051a..dff2a7dac6cc76 100644 --- a/pkg/util/cloudproviders/ibm/ibm.go +++ b/pkg/util/cloudproviders/ibm/ibm.go @@ -8,6 +8,7 @@ package ibm import ( "context" "encoding/json" + "errors" "fmt" "time" @@ -61,7 +62,7 @@ func getToken(ctx context.Context) (string, time.Time, error) { if err != nil { return "", time.Time{}, fmt.Errorf("could not Unmarshal IBM token answer: %s", err) } else if data.Value == "" { - return "", time.Time{}, fmt.Errorf("empty token returned by token API") + return "", time.Time{}, errors.New("empty token returned by token API") } expiresAt, err := time.Parse(time.RFC3339, data.ExpiresAt) @@ -84,7 +85,7 @@ var instanceIDFetcher = cachedfetch.Fetcher{ Name: "IBM instance name", Attempt: func(ctx context.Context) (interface{}, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return "", fmt.Errorf("IBM cloud provider is disabled by configuration") + return "", errors.New("IBM cloud provider is disabled by configuration") } t, err := token.Get(ctx) @@ -95,7 +96,7 @@ var instanceIDFetcher = cachedfetch.Fetcher{ res, err := httputils.Get(ctx, metadataURL+instanceEndpoint, map[string]string{ - "Authorization": fmt.Sprintf("Bearer %s", t), + "Authorization": "Bearer " + t, }, pkgconfigsetup.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, pkgconfigsetup.Datadog()) if err != nil { @@ -116,7 +117,7 @@ var instanceIDFetcher = cachedfetch.Fetcher{ } if data.ID == "" { - return nil, fmt.Errorf("IBM cloud metdata endpoint returned an empty 'id'") + return nil, errors.New("IBM cloud metdata endpoint returned an empty 'id'") } return []string{data.ID}, nil diff --git a/pkg/util/cloudproviders/oracle/oracle.go b/pkg/util/cloudproviders/oracle/oracle.go index 22c6c4557a32a4..d1f342de962abf 100644 --- a/pkg/util/cloudproviders/oracle/oracle.go +++ b/pkg/util/cloudproviders/oracle/oracle.go @@ -7,6 +7,7 @@ package oracle import ( "context" + "errors" "fmt" "time" @@ -71,7 +72,7 @@ func GetNTPHosts(ctx context.Context) []string { func getResponse(ctx context.Context, url string) (string, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return "", fmt.Errorf("cloud provider is disabled by configuration") + return "", errors.New("cloud provider is disabled by configuration") } res, err := httputils.Get(ctx, url, map[string]string{"Authorization": "Bearer Oracle"}, timeout, pkgconfigsetup.Datadog()) diff --git a/pkg/util/cloudproviders/tencent/tencent.go b/pkg/util/cloudproviders/tencent/tencent.go index 7cbb0c54c274f2..029227e15b9f68 100644 --- a/pkg/util/cloudproviders/tencent/tencent.go +++ b/pkg/util/cloudproviders/tencent/tencent.go @@ -7,6 +7,7 @@ package tencent import ( "context" + "errors" "fmt" "time" @@ -81,7 +82,7 @@ func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, maxLengt func getMetadataItem(ctx context.Context, endpoint string) (string, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return "", fmt.Errorf("cloud provider is disabled by configuration") + return "", errors.New("cloud provider is disabled by configuration") } res, err := httputils.Get(ctx, endpoint, nil, timeout, pkgconfigsetup.Datadog()) diff --git a/pkg/util/clusteragent/clcrunner.go b/pkg/util/clusteragent/clcrunner.go index 4537d1dbffe0e2..2b925c0ff0cf9e 100644 --- a/pkg/util/clusteragent/clcrunner.go +++ b/pkg/util/clusteragent/clcrunner.go @@ -66,7 +66,7 @@ func (c *CLCRunnerClient) init() { // Set headers c.clcRunnerAPIRequestHeaders = http.Header{} - c.clcRunnerAPIRequestHeaders.Set(authorizationHeaderKey, fmt.Sprintf("Bearer %s", authToken)) + c.clcRunnerAPIRequestHeaders.Set(authorizationHeaderKey, "Bearer "+authToken) // Set TLS config crossNodeClientTLSConfig, err := pkgapiutil.GetCrossNodeClientTLSConfig() diff --git a/pkg/util/clusteragent/clcrunner_test.go b/pkg/util/clusteragent/clcrunner_test.go index a6c7811366501f..9eaa83b34a5ac3 100644 --- a/pkg/util/clusteragent/clcrunner_test.go +++ b/pkg/util/clusteragent/clcrunner_test.go @@ -68,7 +68,7 @@ func (d *dummyCLCRunner) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) return } - if token != fmt.Sprintf("Bearer %s", d.token) { + if token != "Bearer "+d.token { log.Errorf("wrong token %s", token) w.WriteHeader(http.StatusForbidden) return diff --git a/pkg/util/clusteragent/clusteragent.go b/pkg/util/clusteragent/clusteragent.go index 318f4e25913227..289f2f46a81bd5 100644 --- a/pkg/util/clusteragent/clusteragent.go +++ b/pkg/util/clusteragent/clusteragent.go @@ -15,6 +15,7 @@ import ( "net" "net/http" "net/url" + "strings" "sync" "time" @@ -132,7 +133,7 @@ func (c *DCAClient) init() error { } c.clusterAgentAPIRequestHeaders = http.Header{} - c.clusterAgentAPIRequestHeaders.Set(authorizationHeaderKey, fmt.Sprintf("Bearer %s", authToken)) + c.clusterAgentAPIRequestHeaders.Set(authorizationHeaderKey, "Bearer "+authToken) podIP := pkgconfigsetup.Datadog().GetString("clc_runner_host") c.clusterAgentAPIRequestHeaders.Set(RealIPHeader, podIP) @@ -384,7 +385,7 @@ func (c *DCAClient) GetNamespaceMetadata(nsName string) (*Metadata, error) { func (c *DCAClient) GetNodeAnnotations(nodeName string, filter ...string) (map[string]string, error) { var result map[string]string - base := fmt.Sprintf("api/v1/annotations/node/%s", nodeName) + base := "api/v1/annotations/node/" + nodeName path, err := buildQueryList(base, "filter", filter) if err != nil { return result, err @@ -484,13 +485,18 @@ func buildQueryList(path string, key string, list []string) (string, error) { encodedKey := url.QueryEscape(key) + var builder strings.Builder + builder.WriteString(path) for i, val := range list { encodedVal := url.QueryEscape(val) if i == 0 { - path = path + fmt.Sprintf("?%s=%s", encodedKey, encodedVal) // first parameter starts with a ? + builder.WriteString("?") // first parameter starts with a ? } else { - path = path + fmt.Sprintf("&%s=%s", encodedKey, encodedVal) // the rest start with & + builder.WriteString("&") // the rest start with & } + builder.WriteString(encodedKey) + builder.WriteString("=") + builder.WriteString(encodedVal) } - return path, nil + return builder.String(), nil } diff --git a/pkg/util/clusteragent/clusteragent_test.go b/pkg/util/clusteragent/clusteragent_test.go index 7e89c850011c2c..824c4691d40800 100644 --- a/pkg/util/clusteragent/clusteragent_test.go +++ b/pkg/util/clusteragent/clusteragent_test.go @@ -138,7 +138,7 @@ func (d *dummyClusterAgent) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) return } - if token != fmt.Sprintf("Bearer %s", d.token) { + if token != "Bearer "+d.token { log.Errorf("wrong token %s", token) w.WriteHeader(http.StatusForbidden) return @@ -220,7 +220,7 @@ func (d *dummyClusterAgent) ServeHTTP(w http.ResponseWriter, r *http.Request) { case "node": switch s[3] { case "tags": - key := fmt.Sprintf("node/%s", nodeName) + key := "node/" + nodeName labels, found := d.nodeLabels[key] if found { b, err := json.Marshal(labels) @@ -232,7 +232,7 @@ func (d *dummyClusterAgent) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } case "annotations": - key := fmt.Sprintf("node/%s", nodeName) + key := "node/" + nodeName labels, found := d.nodeAnnotations[key] if found { b, err := json.Marshal(labels) @@ -244,7 +244,7 @@ func (d *dummyClusterAgent) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } case "uid": - key := fmt.Sprintf("node/%s", nodeName) + key := "node/" + nodeName uid, found := d.nodeUIDs[key] if found { uidResp := map[string]string{"uid": uid} diff --git a/pkg/util/common/common.go b/pkg/util/common/common.go index 2cff66dc700dcc..1c657d99db0d9b 100644 --- a/pkg/util/common/common.go +++ b/pkg/util/common/common.go @@ -7,6 +7,7 @@ package common import ( + "errors" "fmt" "reflect" "strings" @@ -121,7 +122,7 @@ func GetSliceOfStringMap(slice []interface{}) ([]map[string]string, error) { for _, e := range slice { value, ok := e.(map[interface{}]interface{}) if !ok { - return nil, fmt.Errorf("unexpected type for slice value") + return nil, errors.New("unexpected type for slice value") } entry := map[string]string{} for k, v := range value { diff --git a/pkg/util/containerd/containerd_util_test.go b/pkg/util/containerd/containerd_util_test.go index 72c7f6bee0d65d..b8e29ddc2eead9 100644 --- a/pkg/util/containerd/containerd_util_test.go +++ b/pkg/util/containerd/containerd_util_test.go @@ -10,7 +10,7 @@ package containerd import ( "context" "encoding/json" - "fmt" + "errors" "testing" v1 "github.com/containerd/cgroups/v3/cgroup1/stats" @@ -227,7 +227,7 @@ func TestTaskMetrics(t *testing.T) { "io.containerd.cgroups.v1.Metric", v1.Metrics{}, "", - fmt.Errorf("no running task found"), + errors.New("no running task found"), &v1.Metrics{}, }, { @@ -235,7 +235,7 @@ func TestTaskMetrics(t *testing.T) { "io.containerd.cgroups.v1.Metric", v1.Metrics{}, "", - fmt.Errorf("no metrics received"), + errors.New("no metrics received"), &v1.Metrics{}, }, } diff --git a/pkg/util/containers/cri/util.go b/pkg/util/containers/cri/util.go index 5750663f899afc..6dd7723ce4f3e2 100644 --- a/pkg/util/containers/cri/util.go +++ b/pkg/util/containers/cri/util.go @@ -10,6 +10,7 @@ package cri import ( "context" + "errors" "fmt" "runtime" "sync" @@ -57,7 +58,7 @@ type CRIUtil struct { // This is not exposed as public API but is called by the retrier embed. func (c *CRIUtil) init() error { if c.socketPath == "" { - return fmt.Errorf("no cri_socket_path was set") + return errors.New("no cri_socket_path was set") } var protocol string diff --git a/pkg/util/containers/image/image_test.go b/pkg/util/containers/image/image_test.go index 50a6746771f2f2..fd5f9398d018e5 100644 --- a/pkg/util/containers/image/image_test.go +++ b/pkg/util/containers/image/image_test.go @@ -6,6 +6,7 @@ package image import ( + "errors" "fmt" "testing" @@ -22,9 +23,9 @@ func TestSplitImageName(t *testing.T) { err error }{ // Empty - {"", "", "", "", "", fmt.Errorf("empty image name")}, + {"", "", "", "", "", errors.New("empty image name")}, // A sha256 string - {"sha256:5bef08742407efd622d243692b79ba0055383bbce12900324f75e56f589aedb0", "", "", "", "", fmt.Errorf("invalid image name (is a sha256)")}, + {"sha256:5bef08742407efd622d243692b79ba0055383bbce12900324f75e56f589aedb0", "", "", "", "", errors.New("invalid image name (is a sha256)")}, // Shortest possibility {"alpine", "alpine", "", "alpine", "", nil}, // Historical docker format diff --git a/pkg/util/containers/metrics/mock/mock.go b/pkg/util/containers/metrics/mock/mock.go index e060c35535afdb..475f07c5760393 100644 --- a/pkg/util/containers/metrics/mock/mock.go +++ b/pkg/util/containers/metrics/mock/mock.go @@ -9,7 +9,7 @@ package mock import ( - "fmt" + "errors" "time" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" @@ -155,7 +155,7 @@ func (mp *Collector) GetContainerStats(_, containerID string, _ time.Duration) ( return entry.ContainerStats, entry.Error } - return nil, fmt.Errorf("container not found") + return nil, errors.New("container not found") } // GetContainerOpenFilesCount returns stats from MockContainerEntry @@ -164,7 +164,7 @@ func (mp *Collector) GetContainerOpenFilesCount(_, containerID string, _ time.Du return entry.OpenFiles, entry.Error } - return nil, fmt.Errorf("container not found") + return nil, errors.New("container not found") } // GetContainerNetworkStats returns stats from MockContainerEntry @@ -173,7 +173,7 @@ func (mp *Collector) GetContainerNetworkStats(_, containerID string, _ time.Dura return entry.NetworkStats, entry.Error } - return nil, fmt.Errorf("container not found") + return nil, errors.New("container not found") } // GetPIDs returns pids from MockContainerEntry @@ -182,7 +182,7 @@ func (mp *Collector) GetPIDs(_, containerID string, _ time.Duration) ([]int, err return entry.PIDs, entry.Error } - return nil, fmt.Errorf("container not found") + return nil, errors.New("container not found") } // GetContainerIDForPID returns a container ID for given PID. diff --git a/pkg/util/containers/metrics/provider/registry_test.go b/pkg/util/containers/metrics/provider/registry_test.go index 8873c3157cfcbe..6e8dc87ec09d83 100644 --- a/pkg/util/containers/metrics/provider/registry_test.go +++ b/pkg/util/containers/metrics/provider/registry_test.go @@ -6,7 +6,7 @@ package provider import ( - "fmt" + "errors" "testing" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -57,7 +57,7 @@ func TestCollectorRegistry(t *testing.T) { Constructor: func(*Cache, option.Option[workloadmeta.Component]) (CollectorMetadata, error) { if dummy3Retries < 2 { dummy3Retries++ - return CollectorMetadata{}, fmt.Errorf("not yet okay") + return CollectorMetadata{}, errors.New("not yet okay") } collector := dummyCollector{ diff --git a/pkg/util/containers/metrics/system/collector_linux.go b/pkg/util/containers/metrics/system/collector_linux.go index 9569cd830be519..9ded960ee0d56f 100644 --- a/pkg/util/containers/metrics/system/collector_linux.go +++ b/pkg/util/containers/metrics/system/collector_linux.go @@ -261,11 +261,11 @@ func (c *systemCollector) GetSelfContainerID() (string, error) { // controller. The `reader` must use a `cgroups.ContainerFilter`. func (c *systemCollector) getSelfContainerIDFromInode() (string, error) { if c.selfReader == nil { - return "", fmt.Errorf("self reader is not initialized") + return "", errors.New("self reader is not initialized") } selfCgroup := c.selfReader.GetCgroup(cgroups.SelfCgroupIdentifier) if selfCgroup == nil { - return "", fmt.Errorf("unable to get self cgroup") + return "", errors.New("unable to get self cgroup") } return c.GetContainerIDForInode(selfCgroup.Inode(), 0) @@ -281,7 +281,7 @@ func (c *systemCollector) getCgroup(containerID string, cacheValidity time.Durat cg = c.reader.GetCgroup(containerID) if cg == nil { - return nil, fmt.Errorf("containerID not found") + return nil, errors.New("containerID not found") } } diff --git a/pkg/util/containers/metrics/system/collector_network_linux.go b/pkg/util/containers/metrics/system/collector_network_linux.go index a994cd7513b7f1..d3a051cf57e269 100644 --- a/pkg/util/containers/metrics/system/collector_network_linux.go +++ b/pkg/util/containers/metrics/system/collector_network_linux.go @@ -36,7 +36,7 @@ func buildNetworkStats(procPath string, pids []int) (*provider.ContainerNetworkS } } - return nil, fmt.Errorf("no process found inside this cgroup, impossible to gather network stats") + return nil, errors.New("no process found inside this cgroup, impossible to gather network stats") } // collectNetworkStats retrieves the network statistics for a given pid. diff --git a/pkg/util/coredump/core_windows.go b/pkg/util/coredump/core_windows.go index 3e7d3915f7636d..429fc67398b6d1 100644 --- a/pkg/util/coredump/core_windows.go +++ b/pkg/util/coredump/core_windows.go @@ -6,7 +6,7 @@ package coredump import ( - "fmt" + "errors" "github.com/DataDog/datadog-agent/pkg/config/model" ) @@ -14,7 +14,7 @@ import ( // Setup enables core dumps and sets the core dump size limit based on configuration func Setup(cfg model.Reader) error { if cfg.GetBool("go_core_dump") { - return fmt.Errorf("Not supported on Windows") + return errors.New("Not supported on Windows") } return nil } diff --git a/pkg/util/crio/crio_util.go b/pkg/util/crio/crio_util.go index 607f91be738224..9947c019647ec4 100644 --- a/pkg/util/crio/crio_util.go +++ b/pkg/util/crio/crio_util.go @@ -11,6 +11,7 @@ package crio import ( "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -225,7 +226,7 @@ func (c *clientImpl) connect() error { // Ensure connection is ready if conn.GetState() != connectivity.Ready { - return fmt.Errorf("connection not in READY state") + return errors.New("connection not in READY state") } return nil diff --git a/pkg/util/docker/docker_test.go b/pkg/util/docker/docker_test.go index eccb329e5fe116..e45e4dd29ec3a6 100644 --- a/pkg/util/docker/docker_test.go +++ b/pkg/util/docker/docker_test.go @@ -110,7 +110,7 @@ func TestResolveImageNameFromContainer(t *testing.T) { expectedImage: imageName, }, } { - t.Run(fmt.Sprintf("case %s", tc.name), func(*testing.T) { + t.Run("case "+tc.name, func(*testing.T) { result, err := globalDockerUtil.ResolveImageNameFromContainer(ctx, tc.input) assert.Equal(tc.expectedImage, result, "%s test failed; expected %s but got %s", tc.name, tc.expectedImage, result) assert.Nil(err, "%s test failed; expected nil error but got %s", tc.name, err) diff --git a/pkg/util/docker/docker_util.go b/pkg/util/docker/docker_util.go index b7f21ca82fdca1..84a5a67a2b661b 100644 --- a/pkg/util/docker/docker_util.go +++ b/pkg/util/docker/docker_util.go @@ -336,7 +336,7 @@ func (d *DockerUtil) InspectNoCache(ctx context.Context, id string, withSize boo container, _, err := d.cli.ContainerInspectWithRaw(ctx, id, withSize) if cerrdefs.IsNotFound(err) { - return container, dderrors.NewNotFound(fmt.Sprintf("docker container %s", id)) + return container, dderrors.NewNotFound("docker container " + id) } if err != nil { return container, err @@ -413,7 +413,7 @@ func (d *DockerUtil) GetContainerPIDs(ctx context.Context, containerID string) ( } } if pidIdx == -1 { - return nil, fmt.Errorf("unable to locate PID index into returned process slice") + return nil, errors.New("unable to locate PID index into returned process slice") } // Create slice large enough to hold each PID diff --git a/pkg/util/docker/storage.go b/pkg/util/docker/storage.go index 167b5b22256ccb..58755c6f3cf066 100644 --- a/pkg/util/docker/storage.go +++ b/pkg/util/docker/storage.go @@ -126,7 +126,7 @@ func parseStorageStatsFromInfo(info system.Info) ([]*StorageStats, error) { func parseDiskQuantity(text string) (uint64, error) { match := diskBytesRe.FindStringSubmatch(text) if match == nil { - return 0, fmt.Errorf("parsing error: invalid format") + return 0, errors.New("parsing error: invalid format") } multi, found := diskUnits[strings.ToLower(match[2])] if !found { diff --git a/pkg/util/docker/storage_test.go b/pkg/util/docker/storage_test.go index d352f6023c565d..414ab8c63b9cce 100644 --- a/pkg/util/docker/storage_test.go +++ b/pkg/util/docker/storage_test.go @@ -8,7 +8,7 @@ package docker import ( - "fmt" + "errors" "math" "testing" @@ -244,10 +244,10 @@ func TestParseDiskQuantity(t *testing.T) { {"521kb", 521000, nil}, {"0 MB", 0, nil}, // Unknown unit - {"10 AB", 0, fmt.Errorf("parsing error: unknown unit AB")}, + {"10 AB", 0, errors.New("parsing error: unknown unit AB")}, // Parsing error - {"10", 0, fmt.Errorf("parsing error: invalid format")}, - {"MB 10", 0, fmt.Errorf("parsing error: invalid format")}, + {"10", 0, errors.New("parsing error: invalid format")}, + {"MB 10", 0, errors.New("parsing error: invalid format")}, } { t.Logf("test case %d", nb) val, err := parseDiskQuantity(tc.text) diff --git a/pkg/util/docker/util_docker.go b/pkg/util/docker/util_docker.go index bb1721be6bb6cb..ee7b9c928e8efd 100644 --- a/pkg/util/docker/util_docker.go +++ b/pkg/util/docker/util_docker.go @@ -8,6 +8,7 @@ package docker import ( + "errors" "fmt" "github.com/docker/docker/api/types/filters" @@ -20,7 +21,7 @@ import ( func buildDockerFilter(args ...string) (volume.ListOptions, error) { filter := filters.NewArgs() if len(args)%2 != 0 { - return volume.ListOptions{Filters: filter}, fmt.Errorf("an even number of arguments is required") + return volume.ListOptions{Filters: filter}, errors.New("an even number of arguments is required") } for i := 0; i < len(args); i += 2 { filter.Add(args[i], args[i+1]) diff --git a/pkg/util/ec2/dmi.go b/pkg/util/ec2/dmi.go index 953edc7dbe0dd8..8a1fe51310066b 100644 --- a/pkg/util/ec2/dmi.go +++ b/pkg/util/ec2/dmi.go @@ -6,6 +6,8 @@ package ec2 import ( + "encoding/hex" + "errors" "fmt" "strings" @@ -31,16 +33,16 @@ func isBoardVendorEC2() bool { func getInstanceIDFromDMI() (string, error) { // we don't want to collect anything in sidecar mode if fargate.IsSidecar() { - return "", fmt.Errorf("host alias detection through DMI is disabled in sidecar mode") + return "", errors.New("host alias detection through DMI is disabled in sidecar mode") } if !pkgconfigsetup.Datadog().GetBool("ec2_use_dmi") { - return "", fmt.Errorf("'ec2_use_dmi' is disabled") + return "", errors.New("'ec2_use_dmi' is disabled") } if !isBoardVendorEC2() { isEC2UUID() - return "", fmt.Errorf("board vendor is not AWS") + return "", errors.New("board vendor is not AWS") } boardAssetTag := dmi.GetBoardAssetTag() @@ -95,7 +97,7 @@ func isEC2UUID() bool { b[0] = byte(IDPart) b[1] = byte(IDPart >> 8) - swapID := fmt.Sprintf("%x", b) + swapID := hex.EncodeToString(b) if strings.HasPrefix(strings.ToLower(swapID), "ec2") { ec2internal.SetCloudProviderSource(ec2internal.MetadataSourceUUID) return true diff --git a/pkg/util/ec2/ec2_account_id.go b/pkg/util/ec2/ec2_account_id.go index 4c1fb8d25538d7..3262c70a60db5b 100644 --- a/pkg/util/ec2/ec2_account_id.go +++ b/pkg/util/ec2/ec2_account_id.go @@ -7,7 +7,7 @@ package ec2 import ( "context" - "fmt" + "errors" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configutils "github.com/DataDog/datadog-agent/pkg/config/utils" @@ -19,7 +19,7 @@ var accountIDFetcher = cachedfetch.Fetcher{ Name: "AWS Account ID", Attempt: func(ctx context.Context) (interface{}, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return "", fmt.Errorf("cloud provider is disabled by configuration") + return "", errors.New("cloud provider is disabled by configuration") } ec2id, err := GetInstanceIdentity(ctx) diff --git a/pkg/util/ec2/ec2_test.go b/pkg/util/ec2/ec2_test.go index 9c6e9451db6b43..db5974b8ef90bd 100644 --- a/pkg/util/ec2/ec2_test.go +++ b/pkg/util/ec2/ec2_test.go @@ -7,10 +7,10 @@ package ec2 import ( "context" - "fmt" "io" "net/http" "net/http/httptest" + "strconv" "testing" "time" @@ -389,7 +389,7 @@ func TestMetedataRequestWithToken(t *testing.T) { if h == "" { w.WriteHeader(http.StatusUnauthorized) } - r.Header.Add("X-sequence", fmt.Sprintf("%v", seq)) + r.Header.Add("X-sequence", strconv.Itoa(seq)) seq++ requestForToken = r io.WriteString(w, testIMDSToken) @@ -397,7 +397,7 @@ func TestMetedataRequestWithToken(t *testing.T) { // Should be a metadata request t := r.Header.Get("X-aws-ec2-metadata-token") if t != testIMDSToken { - r.Header.Add("X-sequence", fmt.Sprintf("%v", seq)) + r.Header.Add("X-sequence", strconv.Itoa(seq)) seq++ requestWithoutToken = r w.WriteHeader(http.StatusUnauthorized) @@ -405,7 +405,7 @@ func TestMetedataRequestWithToken(t *testing.T) { } switch r.RequestURI { case "/public-ipv4": - r.Header.Add("X-sequence", fmt.Sprintf("%v", seq)) + r.Header.Add("X-sequence", strconv.Itoa(seq)) seq++ requestWithToken = r io.WriteString(w, ipv4) @@ -433,7 +433,7 @@ func TestMetedataRequestWithToken(t *testing.T) { assert.Equal(t, "0", requestForToken.Header.Get("X-sequence")) assert.Equal(t, "1", requestWithToken.Header.Get("X-sequence")) - assert.Equal(t, fmt.Sprint(conf.GetInt("ec2_metadata_token_lifetime")), requestForToken.Header.Get("X-aws-ec2-metadata-token-ttl-seconds")) + assert.Equal(t, strconv.Itoa(conf.GetInt("ec2_metadata_token_lifetime")), requestForToken.Header.Get("X-aws-ec2-metadata-token-ttl-seconds")) assert.Equal(t, http.MethodPut, requestForToken.Method) assert.Equal(t, "/", requestForToken.RequestURI) assert.Equal(t, testIMDSToken, requestWithToken.Header.Get("X-aws-ec2-metadata-token")) diff --git a/pkg/util/ec2/internal/helpers.go b/pkg/util/ec2/internal/helpers.go index 3874f81509d552..df7e9d54b03dad 100644 --- a/pkg/util/ec2/internal/helpers.go +++ b/pkg/util/ec2/internal/helpers.go @@ -10,6 +10,7 @@ import ( "context" "encoding/json" "fmt" + "strconv" "sync" "time" @@ -102,7 +103,7 @@ func GetToken(ctx context.Context) (string, time.Time, error) { res, err := httputils.Put(ctx, TokenURL, map[string]string{ - "X-aws-ec2-metadata-token-ttl-seconds": fmt.Sprintf("%d", int(tokenLifetime.Seconds())), + "X-aws-ec2-metadata-token-ttl-seconds": strconv.Itoa(int(tokenLifetime.Seconds())), }, nil, pkgconfigsetup.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond, pkgconfigsetup.Datadog()) diff --git a/pkg/util/ec2/internal/imds_helpers.go b/pkg/util/ec2/internal/imds_helpers.go index 5dae0dc0c68224..67b9d6a311d4e2 100644 --- a/pkg/util/ec2/internal/imds_helpers.go +++ b/pkg/util/ec2/internal/imds_helpers.go @@ -7,6 +7,7 @@ package ec2internal import ( "context" + "errors" "fmt" "time" @@ -53,7 +54,7 @@ func GetMetadataItemWithMaxLength(ctx context.Context, endpoint string, allowedI // GetMetadataItem returns the metadata item at the given endpoint func GetMetadataItem(ctx context.Context, endpoint string, allowedIMDSVersions Ec2IMDSVersionConfig, updateMetadataSource bool) (string, error) { if !configutils.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { - return "", fmt.Errorf("cloud provider is disabled by configuration") + return "", errors.New("cloud provider is disabled by configuration") } return DoHTTPRequest(ctx, MetadataURL+endpoint, allowedIMDSVersions, updateMetadataSource) diff --git a/pkg/util/ec2/network.go b/pkg/util/ec2/network.go index 5b34a57c11bebf..ad92e3f2c8e976 100644 --- a/pkg/util/ec2/network.go +++ b/pkg/util/ec2/network.go @@ -61,11 +61,11 @@ var networkIDFetcher = cachedfetch.Fetcher{ switch len(vpcIDs) { case 0: - return "", fmt.Errorf("EC2: GetNetworkID no mac addresses returned") + return "", errors.New("EC2: GetNetworkID no mac addresses returned") case 1: return vpcIDs.GetAll()[0], nil default: - return "", fmt.Errorf("EC2: GetNetworkID too many mac addresses returned") + return "", errors.New("EC2: GetNetworkID too many mac addresses returned") } }, } @@ -87,7 +87,7 @@ type Subnet struct { // address (mac address) on the current host func GetSubnetForHardwareAddr(ctx context.Context, hwAddr net.HardwareAddr) (subnet Subnet, err error) { if len(hwAddr) == 0 { - err = fmt.Errorf("could not get subnet for empty hw addr") + err = errors.New("could not get subnet for empty hw addr") return } diff --git a/pkg/util/ec2/tags/cluster_name.go b/pkg/util/ec2/tags/cluster_name.go index c6231fe7d33d40..880d0e47de941b 100644 --- a/pkg/util/ec2/tags/cluster_name.go +++ b/pkg/util/ec2/tags/cluster_name.go @@ -8,7 +8,6 @@ package tags import ( "context" "errors" - "fmt" "strings" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -19,7 +18,7 @@ import ( // GetClusterName returns the name of the cluster containing the current EC2 instance func GetClusterName(ctx context.Context) (string, error) { if !configutils.IsCloudProviderEnabled(ec2internal.CloudProviderName, pkgconfigsetup.Datadog()) { - return "", fmt.Errorf("cloud provider is disabled by configuration") + return "", errors.New("cloud provider is disabled by configuration") } tags, err := fetchTagsFromCache(ctx) if err != nil { diff --git a/pkg/util/ec2/tags/container_instance_arn_nodocker.go b/pkg/util/ec2/tags/container_instance_arn_nodocker.go index 2a0770dc72ffa7..cad32db4d9b8b6 100644 --- a/pkg/util/ec2/tags/container_instance_arn_nodocker.go +++ b/pkg/util/ec2/tags/container_instance_arn_nodocker.go @@ -9,10 +9,10 @@ package tags import ( "context" - "fmt" + "errors" ) // getContainerInstanceARN is a stub used when the `docker` build tag is not enabled. func getContainerInstanceARN(_ context.Context) (string, error) { - return "", fmt.Errorf("ECS metadata is not available without docker build tag") + return "", errors.New("ECS metadata is not available without docker build tag") } diff --git a/pkg/util/ec2/tags/ec2_tags.go b/pkg/util/ec2/tags/ec2_tags.go index 9c9d0c138a4d1b..0c59906ab21794 100644 --- a/pkg/util/ec2/tags/ec2_tags.go +++ b/pkg/util/ec2/tags/ec2_tags.go @@ -10,6 +10,7 @@ package tags import ( "context" "encoding/json" + "errors" "fmt" "slices" "strings" @@ -52,7 +53,7 @@ func isTagExcluded(tag string) bool { // integration in Datadog backend allowing customer to collect those information without having to enable the crawler. func GetInstanceInfo(ctx context.Context) ([]string, error) { if !configutils.IsCloudProviderEnabled(ec2internal.CloudProviderName, pkgconfigsetup.Datadog()) { - return nil, fmt.Errorf("cloud provider is disabled by configuration") + return nil, errors.New("cloud provider is disabled by configuration") } if !pkgconfigsetup.Datadog().GetBool("collect_ec2_instance_info") { @@ -77,7 +78,7 @@ func GetInstanceInfo(ctx context.Context) ([]string, error) { if val, ok := info[infoName]; ok { tags = append(tags, fmt.Sprintf("%s:%s", tagName, val)) } else { - tags = append(tags, fmt.Sprintf("%s:unavailable", tagName)) + tags = append(tags, tagName+":unavailable") } } @@ -220,7 +221,7 @@ var fetchTags = fetchEc2Tags func fetchTagsFromCache(ctx context.Context) ([]string, error) { if !configutils.IsCloudProviderEnabled(ec2internal.CloudProviderName, pkgconfigsetup.Datadog()) { - return nil, fmt.Errorf("cloud provider is disabled by configuration") + return nil, errors.New("cloud provider is disabled by configuration") } tags, err := fetchTags(ctx) diff --git a/pkg/util/ec2/tags/ec2_tags_test.go b/pkg/util/ec2/tags/ec2_tags_test.go index 1341e6c7c38b73..176a2c2ee2583e 100644 --- a/pkg/util/ec2/tags/ec2_tags_test.go +++ b/pkg/util/ec2/tags/ec2_tags_test.go @@ -9,6 +9,7 @@ package tags import ( "context" + "errors" "fmt" "io" "net/http" @@ -124,7 +125,7 @@ func mockFetchTagsSuccess(_ context.Context) ([]string, error) { } func mockFetchTagsFailure(_ context.Context) ([]string, error) { - return nil, fmt.Errorf("could not fetch tags") + return nil, errors.New("could not fetch tags") } func TestGetTags(t *testing.T) { @@ -147,7 +148,7 @@ func TestGetTagsErrorEmptyCache(t *testing.T) { tags, err := GetTags(ctx) assert.Nil(t, tags) - assert.Equal(t, fmt.Errorf("unable to get tags from aws and cache is empty: could not fetch tags"), err) + assert.Equal(t, errors.New("unable to get tags from aws and cache is empty: could not fetch tags"), err) } func TestGetTagsErrorFullCache(t *testing.T) { diff --git a/pkg/util/ecs/metadata/detection.go b/pkg/util/ecs/metadata/detection.go index c3ac6b96c06ba6..f221931a6d3f68 100644 --- a/pkg/util/ecs/metadata/detection.go +++ b/pkg/util/ecs/metadata/detection.go @@ -137,7 +137,7 @@ func testURLs(urls []string, timeout time.Duration) string { func getAgentV3URLFromEnv() (string, error) { agentURL, found := os.LookupEnv(v3or4.DefaultMetadataURIv3EnvVariable) if !found { - return "", fmt.Errorf("Could not initialize client: missing metadata v3 URL") + return "", errors.New("Could not initialize client: missing metadata v3 URL") } return agentURL, nil } @@ -145,7 +145,7 @@ func getAgentV3URLFromEnv() (string, error) { func getAgentV4URLFromEnv() (string, error) { agentURL, found := os.LookupEnv(v3or4.DefaultMetadataURIv4EnvVariable) if !found { - return "", fmt.Errorf("Could not initialize client: missing metadata v4 URL") + return "", errors.New("Could not initialize client: missing metadata v4 URL") } return agentURL, nil } diff --git a/pkg/util/ecs/metadata/v3or4/client_test.go b/pkg/util/ecs/metadata/v3or4/client_test.go index f877afe0f17783..c9ced7ba74e68d 100644 --- a/pkg/util/ecs/metadata/v3or4/client_test.go +++ b/pkg/util/ecs/metadata/v3or4/client_test.go @@ -9,7 +9,7 @@ package v3or4 import ( "context" - "fmt" + "errors" "os" "testing" "time" @@ -29,7 +29,7 @@ func TestGetV4TaskWithTags(t *testing.T) { ts := dummyECS.Start() defer ts.Close() - client := NewClient(fmt.Sprintf("%s/v4/1234-1", ts.URL), "v4") + client := NewClient(ts.URL+"/v4/1234-1", "v4") task, err := client.GetTaskWithTags(context.Background()) require.NoError(t, err) @@ -45,7 +45,7 @@ func TestGetV4TaskWithTagsWithoutRetryWithDelay(t *testing.T) { require.NoError(t, err) ts := dummyECS.Start() - client := NewClient(fmt.Sprintf("%s/v4/1234-1", ts.URL), "v4") + client := NewClient(ts.URL+"/v4/1234-1", "v4") task, err := client.GetTaskWithTags(context.Background()) ts.Close() @@ -66,7 +66,7 @@ func TestGetV4TaskWithTagsWithRetryWithDelay(t *testing.T) { ts := dummyECS.Start() c := NewClient( - fmt.Sprintf("%s/v4/1234-1", ts.URL), + ts.URL+"/v4/1234-1", "v4", WithTryOption(100*time.Millisecond, 2*time.Second, func(d time.Duration) time.Duration { return 2 * d }), ) @@ -155,7 +155,7 @@ func TestGetContainerStats(t *testing.T) { name: "missing-container", fixture: "./testdata/task_stats.json", containerID: "470f831ceac0479b8c6614a7232e707fb24760c350b13ee589dd1d6424315d42", - expectedErr: fmt.Errorf("Failed to retrieve container stats for id: 470f831ceac0479b8c6614a7232e707fb24760c350b13ee589dd1d6424315d42"), + expectedErr: errors.New("Failed to retrieve container stats for id: 470f831ceac0479b8c6614a7232e707fb24760c350b13ee589dd1d6424315d42"), }, } @@ -168,7 +168,7 @@ func TestGetContainerStats(t *testing.T) { ts := dummyECS.Start() defer ts.Close() - client := NewClient(fmt.Sprintf("%s/v4/1234-1", ts.URL), "v4") + client := NewClient(ts.URL+"/v4/1234-1", "v4") stats, err := client.GetContainerStats(ctx, tt.containerID) if tt.expectedErr != nil { diff --git a/pkg/util/fargate/hostname_process.go b/pkg/util/fargate/hostname_process.go index da1320552d03aa..587904c10a3f07 100644 --- a/pkg/util/fargate/hostname_process.go +++ b/pkg/util/fargate/hostname_process.go @@ -10,7 +10,6 @@ package fargate import ( "context" "errors" - "fmt" ecsmeta "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -51,7 +50,7 @@ func getECSHost(ctx context.Context) (string, error) { if err != nil { return "", err } - return fmt.Sprintf("fargate_task:%s", taskMeta.TaskARN), nil + return "fargate_task:" + taskMeta.TaskARN, nil } func getEKSHost(context.Context) (string, error) { @@ -70,5 +69,5 @@ func getECSManagedInstancesHost(ctx context.Context) (string, error) { if err != nil { return "", err } - return fmt.Sprintf("sidecar_host:%s", taskMeta.TaskARN), nil + return "sidecar_host:" + taskMeta.TaskARN, nil } diff --git a/pkg/util/filesystem/concurrent_write.go b/pkg/util/filesystem/concurrent_write.go index ca1e05d508fa64..d29dd58592050b 100644 --- a/pkg/util/filesystem/concurrent_write.go +++ b/pkg/util/filesystem/concurrent_write.go @@ -45,7 +45,7 @@ func FetchArtifact[T any](ctx context.Context, location string, factory Artifact select { case <-ctx.Done(): - return zero, fmt.Errorf("unable to read the artifact in the given time") + return zero, errors.New("unable to read the artifact in the given time") case <-time.After(retryDelay): // try again } @@ -138,7 +138,7 @@ func FetchOrCreateArtifact[T any](ctx context.Context, location string, factory select { case <-ctx.Done(): - return zero, errors.Join(fmt.Errorf("unable to read the artifact or acquire the lock in the given time"), lockErr) + return zero, errors.Join(errors.New("unable to read the artifact or acquire the lock in the given time"), lockErr) case <-time.After(retryDelay): // try again } diff --git a/pkg/util/fxutil/createcomponent.go b/pkg/util/fxutil/createcomponent.go index fed34b2f924b69..b8b487c6801a9c 100644 --- a/pkg/util/fxutil/createcomponent.go +++ b/pkg/util/fxutil/createcomponent.go @@ -88,7 +88,7 @@ func getBundleName() string { filename = filepath.ToSlash(filename) components := strings.Split(filename, "/") if len(components) >= 3 && components[len(components)-3] == "comp" { - return fmt.Sprintf("comp/%s", components[len(components)-2]) + return "comp/" + components[len(components)-2] } panic("must be called from a bundle (comp//bundle.go)") } diff --git a/pkg/util/fxutil/logging/sender.go b/pkg/util/fxutil/logging/sender.go index b2db4b0c3bd294..2aafe8da631d5c 100644 --- a/pkg/util/fxutil/logging/sender.go +++ b/pkg/util/fxutil/logging/sender.go @@ -13,6 +13,7 @@ import ( "io" "net/http" "runtime" + "strconv" "strings" "time" ) @@ -89,6 +90,6 @@ func createRequest(agentURL string, data []byte, traceCount int) (*http.Request, req.Header.Set("Content-Type", "application/json") req.Header.Set("Datadog-Meta-Lang", "go") req.Header.Set("Datadog-Meta-Lang-Version", strings.TrimPrefix(runtime.Version(), "go")) - req.Header.Set("X-Datadog-Trace-Count", fmt.Sprintf("%d", traceCount)) + req.Header.Set("X-Datadog-Trace-Count", strconv.Itoa(traceCount)) return req, nil } diff --git a/pkg/util/fxutil/provide_comp_test.go b/pkg/util/fxutil/provide_comp_test.go index 3098c710d0862c..5c61cc629d09d0 100644 --- a/pkg/util/fxutil/provide_comp_test.go +++ b/pkg/util/fxutil/provide_comp_test.go @@ -8,6 +8,7 @@ package fxutil import ( "context" "encoding/json" + "errors" "fmt" "reflect" "regexp" @@ -454,7 +455,7 @@ func TestFxReturnAnError(t *testing.T) { NewAgentComponent := func(reqs requires1) (provides2, error) { return provides2{ Second: &secondImpl{First: reqs.First}, - }, fmt.Errorf("fail construction") + }, errors.New("fail construction") } // define an entry point that uses the component start := func(_ SecondComp) { diff --git a/pkg/util/grpc/auth.go b/pkg/util/grpc/auth.go index fae83d5c22e8d4..0e0011e220bd04 100644 --- a/pkg/util/grpc/auth.go +++ b/pkg/util/grpc/auth.go @@ -9,7 +9,6 @@ import ( "context" "crypto/subtle" "errors" - "fmt" grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" "google.golang.org/grpc/codes" @@ -60,7 +59,7 @@ type bearerTokenAuth struct { func (b bearerTokenAuth) GetRequestMetadata(_ context.Context, _ ...string) (map[string]string, error) { return map[string]string{ - "authorization": fmt.Sprintf("Bearer %s", b.token), + "authorization": "Bearer " + b.token, }, nil } diff --git a/pkg/util/hostinfo/hostinfo_windows.go b/pkg/util/hostinfo/hostinfo_windows.go index 3ac381a91d45e2..e0db34e0c5e7a0 100644 --- a/pkg/util/hostinfo/hostinfo_windows.go +++ b/pkg/util/hostinfo/hostinfo_windows.go @@ -19,7 +19,7 @@ package hostinfo import ( - "fmt" + "errors" "os" "runtime" "time" @@ -108,7 +108,7 @@ func Pids() ([]int32, error) { for { ps := make([]uint32, psSize) if !w32.EnumProcesses(ps, uint32(len(ps)), &read) { - return nil, fmt.Errorf("could not get w32.EnumProcesses") + return nil, errors.New("could not get w32.EnumProcesses") } if uint32(len(ps)) == read { // ps buffer was too small to host every results, retry with a bigger one psSize += 1024 diff --git a/pkg/util/hostname/common.go b/pkg/util/hostname/common.go index ca5eed991f5edb..b724387c23e7b2 100644 --- a/pkg/util/hostname/common.go +++ b/pkg/util/hostname/common.go @@ -8,6 +8,7 @@ package hostname import ( "context" + "errors" "fmt" "os" "strings" @@ -53,7 +54,7 @@ func fromHostnameFile(ctx context.Context, _ string) (string, error) { // Try `hostname_file` config option next hostnameFilepath := pkgconfigsetup.Datadog().GetString("hostname_file") if hostnameFilepath == "" { - return "", fmt.Errorf("'hostname_file' configuration is not enabled") + return "", errors.New("'hostname_file' configuration is not enabled") } fileContent, err := os.ReadFile(hostnameFilepath) @@ -77,7 +78,7 @@ func fromFargate(_ context.Context, _ string) (string, error) { if isSidecar() { return "", nil } - return "", fmt.Errorf("agent is not running in sidecar mode") + return "", errors.New("agent is not running in sidecar mode") } func fromGCE(ctx context.Context, _ string) (string, error) { @@ -90,7 +91,7 @@ func fromAzure(ctx context.Context, _ string) (string, error) { func fromFQDN(ctx context.Context, _ string) (string, error) { if !osHostnameUsable(ctx) { - return "", fmt.Errorf("FQDN hostname is not usable") + return "", errors.New("FQDN hostname is not usable") } if pkgconfigsetup.Datadog().GetBool("hostname_fqdn") { @@ -100,7 +101,7 @@ func fromFQDN(ctx context.Context, _ string) (string, error) { } return "", fmt.Errorf("Unable to get FQDN from system: %s", err) } - return "", fmt.Errorf("'hostname_fqdn' configuration is not enabled") + return "", errors.New("'hostname_fqdn' configuration is not enabled") } func fromOS(ctx context.Context, currentHostname string) (string, error) { @@ -108,9 +109,9 @@ func fromOS(ctx context.Context, currentHostname string) (string, error) { if currentHostname == "" { return osHostname() } - return "", fmt.Errorf("Skipping OS hostname as a previous provider found a valid hostname") + return "", errors.New("Skipping OS hostname as a previous provider found a valid hostname") } - return "", fmt.Errorf("OS hostname is not usable") + return "", errors.New("OS hostname is not usable") } func getValidEC2Hostname(ctx context.Context, legacyHostnameResolution bool) (string, error) { @@ -161,7 +162,7 @@ func resolveEC2Hostname(ctx context.Context, currentHostname string, legacyHostn " For more information: https://docs.datadoghq.com/ec2-use-win-prefix-detection", currentHostname, ec2Hostname) } } - return "", fmt.Errorf("not retrieving hostname from AWS: the host is not an ECS instance and other providers already retrieve non-default hostnames") + return "", errors.New("not retrieving hostname from AWS: the host is not an ECS instance and other providers already retrieve non-default hostnames") } func fromEC2(ctx context.Context, currentHostname string) (string, error) { diff --git a/pkg/util/hostname/common_test.go b/pkg/util/hostname/common_test.go index e6c03721395309..0547ec6d21063d 100644 --- a/pkg/util/hostname/common_test.go +++ b/pkg/util/hostname/common_test.go @@ -7,7 +7,7 @@ package hostname import ( "context" - "fmt" + "errors" "os" "testing" @@ -149,7 +149,7 @@ func TestFromEc2DefaultHostname(t *testing.T) { defer func() { ec2GetInstanceID = ec2.GetInstanceID }() // make AWS provider return an error - ec2GetInstanceID = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } + ec2GetInstanceID = func(context.Context) (string, error) { return "", errors.New("some error") } _, err := fromEC2(context.Background(), "ip-hostname") assert.Error(t, err) @@ -169,7 +169,7 @@ func TestFromEc2Prioritize(t *testing.T) { cfg.SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) // make AWS provider return an error - ec2GetInstanceID = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } + ec2GetInstanceID = func(context.Context) (string, error) { return "", errors.New("some error") } _, err := fromEC2(context.Background(), "non-default-hostname") assert.Error(t, err) diff --git a/pkg/util/hostname/container.go b/pkg/util/hostname/container.go index 72a1f4db0f9360..3f539e9b9a6818 100644 --- a/pkg/util/hostname/container.go +++ b/pkg/util/hostname/container.go @@ -9,7 +9,7 @@ package hostname import ( "context" - "fmt" + "errors" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/util/docker" @@ -47,7 +47,7 @@ func callContainerProvider(ctx context.Context, provider func(context.Context) ( // for testing purposes func fromContainer(ctx context.Context, _ string) (string, error) { if !configIsContainerized() { - return "", fmt.Errorf("the agent is not containerized") + return "", errors.New("the agent is not containerized") } // Cluster-agent logic: Kube apiserver @@ -70,5 +70,5 @@ func fromContainer(ctx context.Context, _ string) (string, error) { } } - return "", fmt.Errorf("no container environment detected or none of them detected a valid hostname") + return "", errors.New("no container environment detected or none of them detected a valid hostname") } diff --git a/pkg/util/hostname/container_test.go b/pkg/util/hostname/container_test.go index 87bd91a943939d..64ef6ec0dc9995 100644 --- a/pkg/util/hostname/container_test.go +++ b/pkg/util/hostname/container_test.go @@ -9,7 +9,7 @@ package hostname import ( "context" - "fmt" + "errors" "testing" "github.com/DataDog/datadog-agent/pkg/config/env" @@ -55,13 +55,13 @@ func TestFromContainer(t *testing.T) { assert.Equal(t, "kubernetes-hostname", hostname) // kubelet - kubernetesGetKubeAPIServerHostname = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } + kubernetesGetKubeAPIServerHostname = func(context.Context) (string, error) { return "", errors.New("some error") } hostname, err = fromContainer(ctx, "") require.NoError(t, err) assert.Equal(t, "kubelet-hostname", hostname) - kubeletGetHostname = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } + kubeletGetHostname = func(context.Context) (string, error) { return "", errors.New("some error") } _, err = fromContainer(ctx, "") assert.Error(t, err) @@ -72,7 +72,7 @@ func TestFromContainer(t *testing.T) { require.NoError(t, err) assert.Equal(t, "docker-hostname", hostname) - dockerGetHostname = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } + dockerGetHostname = func(context.Context) (string, error) { return "", errors.New("some error") } _, err = fromContainer(ctx, "") require.Error(t, err) } diff --git a/pkg/util/hostname/providers.go b/pkg/util/hostname/providers.go index f457cda1a0b5f8..118defc45b3be7 100644 --- a/pkg/util/hostname/providers.go +++ b/pkg/util/hostname/providers.go @@ -9,8 +9,8 @@ package hostname import ( "context" + "errors" "expvar" - "fmt" "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -238,7 +238,7 @@ func getHostname(ctx context.Context, keyCache string, legacyHostnameResolution return hostnameData, nil } - err = fmt.Errorf("unable to reliably determine the host name. You can define one in the agent config file or in your hosts file") + err = errors.New("unable to reliably determine the host name. You can define one in the agent config file or in your hosts file") expErr := new(expvar.String) expErr.Set(err.Error()) hostnameErrors.Set("all", expErr) diff --git a/pkg/util/hostname/providers_test.go b/pkg/util/hostname/providers_test.go index aa1cd6370267d8..3615068aabfccf 100644 --- a/pkg/util/hostname/providers_test.go +++ b/pkg/util/hostname/providers_test.go @@ -9,7 +9,7 @@ package hostname import ( "context" - "fmt" + "errors" "os" "testing" @@ -76,13 +76,13 @@ func setupHostnameTest(t *testing.T, tc testCase) { if tc.GCE { gceGetHostname = func(context.Context) (string, error) { return "hostname-from-gce", nil } } else { - gceGetHostname = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } + gceGetHostname = func(context.Context) (string, error) { return "", errors.New("some error") } } if tc.azure { azureGetHostname = func(context.Context) (string, error) { return "hostname-from-azure", nil } } else { - azureGetHostname = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } + azureGetHostname = func(context.Context) (string, error) { return "", errors.New("some error") } } if tc.FQDN || tc.FQDNEC2 { @@ -95,7 +95,7 @@ func setupHostnameTest(t *testing.T, tc testCase) { fqdnHostname = func() (string, error) { return "ip-default-ec2-hostname", nil } } } else { - fqdnHostname = func() (string, error) { return "", fmt.Errorf("some error") } + fqdnHostname = func() (string, error) { return "", errors.New("some error") } } if tc.OS || tc.OSEC2 { @@ -107,13 +107,13 @@ func setupHostnameTest(t *testing.T, tc testCase) { osHostname = func() (string, error) { return "ip-default-ec2-hostname", nil } } } else { - osHostname = func() (string, error) { return "", fmt.Errorf("some error") } + osHostname = func() (string, error) { return "", errors.New("some error") } } if tc.EC2 { ec2GetInstanceID = func(context.Context) (string, error) { return "hostname-from-ec2", nil } } else { - ec2GetInstanceID = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } + ec2GetInstanceID = func(context.Context) (string, error) { return "", errors.New("some error") } } if tc.EC2Proritized { diff --git a/pkg/util/hostname/validate/normalize.go b/pkg/util/hostname/validate/normalize.go index 5ed6240ea76596..1428eda745640a 100644 --- a/pkg/util/hostname/validate/normalize.go +++ b/pkg/util/hostname/validate/normalize.go @@ -7,7 +7,7 @@ package validate import ( "bytes" - "fmt" + "errors" "regexp" ) @@ -27,14 +27,14 @@ func NormalizeHost(host string) (string, error) { // hosts longer than 253 characters are illegal if len(host) > 253 { - return "", fmt.Errorf("hostname is too long, should contain less than 253 characters") + return "", errors.New("hostname is too long, should contain less than 253 characters") } for _, r := range host { switch r { // has null rune just toss the whole thing case '\x00': - return "", fmt.Errorf("hostname cannot contain null character") + return "", errors.New("hostname cannot contain null character") // drop these characters entirely case '\n', '\r', '\t': continue diff --git a/pkg/util/hostname/validate/validate.go b/pkg/util/hostname/validate/validate.go index 10e3c7af6949fa..7e85a59d4771e5 100644 --- a/pkg/util/hostname/validate/validate.go +++ b/pkg/util/hostname/validate/validate.go @@ -7,6 +7,7 @@ package validate import ( + "errors" "fmt" "regexp" "slices" @@ -31,7 +32,7 @@ var ( // In case it's not, the returned error contains the details of the failure. func ValidHostname(hostname string) error { if hostname == "" { - return fmt.Errorf("hostname is empty") + return errors.New("hostname is empty") } else if isLocal(hostname) { return fmt.Errorf("%s is a local hostname", hostname) } else if len(hostname) > maxLength { diff --git a/pkg/util/http/transport_test.go b/pkg/util/http/transport_test.go index 58e09e19f5b12a..b8fea38ad89aac 100644 --- a/pkg/util/http/transport_test.go +++ b/pkg/util/http/transport_test.go @@ -7,7 +7,6 @@ package http import ( "crypto/tls" - "fmt" "net/http" "testing" "time" @@ -261,7 +260,7 @@ func TestMinTLSVersionFromConfig(t *testing.T) { for _, test := range tests { t.Run( - fmt.Sprintf("min_tls_version=%s", test.minTLSVersion), + "min_tls_version="+test.minTLSVersion, func(t *testing.T) { cfg := configmock.New(t) if test.minTLSVersion != "" { diff --git a/pkg/util/installinfo/install_info.go b/pkg/util/installinfo/install_info.go index a67c075f56f126..43142742bccdc9 100644 --- a/pkg/util/installinfo/install_info.go +++ b/pkg/util/installinfo/install_info.go @@ -12,7 +12,7 @@ package installinfo import ( "encoding/json" - "fmt" + "errors" "net/http" "os" "path/filepath" @@ -92,11 +92,11 @@ func Get(conf model.Reader) (*InstallInfo, error) { // setRuntimeInstallInfo sets the install info at runtime, overriding file and env var values func setRuntimeInstallInfo(info *InstallInfo) error { if info == nil { - return fmt.Errorf("install info cannot be nil") + return errors.New("install info cannot be nil") } if info.Tool == "" || info.ToolVersion == "" || info.InstallerVersion == "" { - return fmt.Errorf("install info must have tool, tool_version, and installer_version set") + return errors.New("install info must have tool, tool_version, and installer_version set") } runtimeInfoMutex.Lock() diff --git a/pkg/util/installinfo/install_info_test.go b/pkg/util/installinfo/install_info_test.go index 0b62882b4e4334..ac7a3cd2fc9ac3 100644 --- a/pkg/util/installinfo/install_info_test.go +++ b/pkg/util/installinfo/install_info_test.go @@ -7,8 +7,8 @@ package installinfo import ( "encoding/json" - "fmt" "os" + "strconv" "strings" "testing" "time" @@ -131,7 +131,7 @@ func Test_logVersionHistoryToFile_maxVersionHistoryEntries(t *testing.T) { expected := make([]versionHistoryEntry, maxVersionHistoryEntries) for i := 0; i < maxVersionHistoryEntries; i++ { entries[i] = versionHistoryEntry{ - Version: fmt.Sprintf("%d", i), + Version: strconv.Itoa(i), Timestamp: now.Add(time.Duration(i) * time.Second), InstallMethod: InstallInfo{ Tool: "tool", @@ -140,7 +140,7 @@ func Test_logVersionHistoryToFile_maxVersionHistoryEntries(t *testing.T) { }, } expected[i] = versionHistoryEntry{ - Version: fmt.Sprintf("%d", i+10), + Version: strconv.Itoa(i + 10), Timestamp: now.Add(time.Duration(i+10) * time.Second), InstallMethod: InstallInfo{ Tool: "tool", @@ -169,7 +169,7 @@ install_method: logVersionHistoryToFile( actual.Name(), installInfoFile.Name(), - fmt.Sprintf("%d", i), + strconv.Itoa(i), now.Add(time.Duration(i)*time.Second), ) } diff --git a/pkg/util/intern/string_test.go b/pkg/util/intern/string_test.go index beb77b7175642c..f3d3fca1e7c1d0 100644 --- a/pkg/util/intern/string_test.go +++ b/pkg/util/intern/string_test.go @@ -35,7 +35,6 @@ package intern import ( - "fmt" "runtime" "testing" ) @@ -104,7 +103,7 @@ func (s *StringInterner) mapKeys() (keys []string) { s.mu.Lock() defer s.mu.Unlock() for k := range s.valMap { - keys = append(keys, fmt.Sprint(k)) + keys = append(keys, k) } return keys } diff --git a/pkg/util/kernel/netns/netns.go b/pkg/util/kernel/netns/netns.go index b28f7d92006e9b..1132d97124bd74 100644 --- a/pkg/util/kernel/netns/netns.go +++ b/pkg/util/kernel/netns/netns.go @@ -142,7 +142,7 @@ func GetNetNsInoFromPid(procRoot string, pid int) (uint32, error) { // GetInoForNs gets the inode number for the given network namespace func GetInoForNs(ns netns.NsHandle) (uint32, error) { if ns.Equal(netns.None()) { - return 0, fmt.Errorf("net ns is none") + return 0, errors.New("net ns is none") } var s syscall.Stat_t diff --git a/pkg/util/kernel/netns/netns_darwin.go b/pkg/util/kernel/netns/netns_darwin.go index dbe5b4d7157dbb..8e0eaf6d5d6662 100644 --- a/pkg/util/kernel/netns/netns_darwin.go +++ b/pkg/util/kernel/netns/netns_darwin.go @@ -7,7 +7,7 @@ package netns import ( - "fmt" + "errors" "github.com/vishvananda/netns" ) @@ -15,10 +15,10 @@ import ( // GetNetNsInoFromPid gets the network namespace inode number for the given // `pid` func GetNetNsInoFromPid(_ string, _ int) (uint32, error) { - return 0, fmt.Errorf("not supported") + return 0, errors.New("not supported") } // GetInoForNs gets the inode number for the given network namespace func GetInoForNs(_ netns.NsHandle) (uint32, error) { - return 0, fmt.Errorf("not supported") + return 0, errors.New("not supported") } diff --git a/pkg/util/kernel/netns/netns_windows.go b/pkg/util/kernel/netns/netns_windows.go index 3347dd10b7e538..26420a8c08366c 100644 --- a/pkg/util/kernel/netns/netns_windows.go +++ b/pkg/util/kernel/netns/netns_windows.go @@ -7,7 +7,7 @@ package netns import ( - "fmt" + "errors" "github.com/vishvananda/netns" ) @@ -15,10 +15,10 @@ import ( // GetNetNsInoFromPid gets the network namespace inode number for the given // `pid` func GetNetNsInoFromPid(_ string, _ int) (uint32, error) { - return 0, fmt.Errorf("not supported") + return 0, errors.New("not supported") } // GetInoForNs gets the inode number for the given network namespace func GetInoForNs(_ netns.NsHandle) (uint32, error) { - return 0, fmt.Errorf("not supported") + return 0, errors.New("not supported") } diff --git a/pkg/util/kubelet/hostname_stub.go b/pkg/util/kubelet/hostname_stub.go index 69a59eff185d14..8e99f108b6f1ba 100644 --- a/pkg/util/kubelet/hostname_stub.go +++ b/pkg/util/kubelet/hostname_stub.go @@ -9,10 +9,10 @@ package kubelet import ( "context" - "fmt" + "errors" ) // GetHostname builds a hostname from the kubernetes nodename and an optional cluster-name func GetHostname(_ context.Context) (string, error) { - return "", fmt.Errorf("kubelet hostname provider is not enabled") + return "", errors.New("kubelet hostname provider is not enabled") } diff --git a/pkg/util/kubelet/no_host_alias.go b/pkg/util/kubelet/no_host_alias.go index 3d117e2134a057..6f7573b7472809 100644 --- a/pkg/util/kubelet/no_host_alias.go +++ b/pkg/util/kubelet/no_host_alias.go @@ -9,12 +9,12 @@ package kubelet import ( "context" - "fmt" + "errors" ) // GetHostAliases uses the "kubelet" hostname provider to fetch the kubernetes alias func GetHostAliases(_ context.Context) ([]string, error) { - return nil, fmt.Errorf("Kubernetes support not build: couldn't extract a host alias from the kubelet") + return nil, errors.New("Kubernetes support not build: couldn't extract a host alias from the kubelet") } // GetMetaClusterNameText returns the clusterName text for the agent status output diff --git a/pkg/util/kubernetes/apiserver/apiserver.go b/pkg/util/kubernetes/apiserver/apiserver.go index f4ad8dad56fce1..e82be37c7740c2 100644 --- a/pkg/util/kubernetes/apiserver/apiserver.go +++ b/pkg/util/kubernetes/apiserver/apiserver.go @@ -194,13 +194,13 @@ func WaitForAPIClient(ctx context.Context) (*APIClient, error) { case retry.OK: return globalAPIClient, nil case retry.PermaFail: - return nil, fmt.Errorf("Permanent failure while waiting for Kubernetes APIServer") + return nil, errors.New("Permanent failure while waiting for Kubernetes APIServer") default: sleepFor := globalAPIClient.initRetry.NextRetry().UTC().Sub(time.Now().UTC()) + time.Second log.Debugf("Waiting for APIServer, next retry: %v", sleepFor) select { case <-ctx.Done(): - return nil, fmt.Errorf("Context deadline reached while waiting for Kubernetes APIServer") + return nil, errors.New("Context deadline reached while waiting for Kubernetes APIServer") case <-time.After(sleepFor): } } @@ -414,7 +414,7 @@ func (c *APIClient) connect() error { // Try to get apiserver version to confim connectivity APIversion := c.Cl.Discovery().RESTClient().APIVersion() if APIversion.Empty() { - return fmt.Errorf("cannot retrieve the version of the API server at the moment") + return errors.New("cannot retrieve the version of the API server at the moment") } log.Debugf("Connected to kubernetes apiserver, version %s", APIversion.Version) @@ -558,7 +558,7 @@ func GetMetadataMapBundleOnAllNodes(cl *APIClient) (*apiv1.MetadataResponse, err nodes, err := getNodeList(cl) if err != nil { - stats.Errors = fmt.Sprintf("Failed to get nodes from the API server: %s", err.Error()) + stats.Errors = "Failed to get nodes from the API server: " + err.Error() return stats, err } @@ -606,7 +606,7 @@ func GetPodMetadataNames(nodeName, ns, podName string) ([]string, error) { log.Tracef("found %d services for the pod %s on the node %s", len(serviceList), podName, nodeName) var metaList []string for _, s := range serviceList { - metaList = append(metaList, fmt.Sprintf("kube_service:%s", s)) + metaList = append(metaList, "kube_service:"+s) } return metaList, nil } @@ -677,7 +677,7 @@ func (c *APIClient) GetARandomNodeName(ctx context.Context) (string, error) { } if len(nodeList.Items) == 0 { - return "", fmt.Errorf("No node found") + return "", errors.New("No node found") } return nodeList.Items[0].Name, nil diff --git a/pkg/util/kubernetes/apiserver/common/common_linux.go b/pkg/util/kubernetes/apiserver/common/common_linux.go index 72c8cd64a6f96b..35f2d7caa3c4c7 100644 --- a/pkg/util/kubernetes/apiserver/common/common_linux.go +++ b/pkg/util/kubernetes/apiserver/common/common_linux.go @@ -8,7 +8,7 @@ package common import ( - "fmt" + "errors" "os" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -35,7 +35,7 @@ func GetSelfPodName() (string, error) { } if *hostUTS { - return "", fmt.Errorf("DD_POD_NAME is not set and running in host UTS namespace; cannot reliably determine self pod name") + return "", errors.New("DD_POD_NAME is not set and running in host UTS namespace; cannot reliably determine self pod name") } return os.Hostname() diff --git a/pkg/util/kubernetes/apiserver/controllers/controllers.go b/pkg/util/kubernetes/apiserver/controllers/controllers.go index d90f81e5a503a6..20912a1bb2c6ce 100644 --- a/pkg/util/kubernetes/apiserver/controllers/controllers.go +++ b/pkg/util/kubernetes/apiserver/controllers/controllers.go @@ -11,7 +11,6 @@ package controllers import ( "errors" - "fmt" "sync" k8serrors "k8s.io/apimachinery/pkg/util/errors" @@ -144,7 +143,7 @@ func startAutoscalersController(ctx *ControllerContext, c chan error) { var err error dc, ok := ctx.DatadogClient.Get() if !ok { - c <- fmt.Errorf("datadog client is not initialized") + c <- errors.New("datadog client is not initialized") return } autoscalersController, err := newAutoscalersController( diff --git a/pkg/util/kubernetes/apiserver/controllers/wpa_controller_test.go b/pkg/util/kubernetes/apiserver/controllers/wpa_controller_test.go index 826678ca3d4805..e8b49dab6701bf 100644 --- a/pkg/util/kubernetes/apiserver/controllers/wpa_controller_test.go +++ b/pkg/util/kubernetes/apiserver/controllers/wpa_controller_test.go @@ -9,6 +9,7 @@ package controllers import ( "context" + "errors" "fmt" "reflect" "strings" @@ -39,7 +40,7 @@ import ( datadogclientmock "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/mock" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" - "github.com/DataDog/datadog-agent/pkg/errors" + pkgerrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/autoscalers" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -444,7 +445,7 @@ func TestWPASync(t *testing.T) { fakeKey := "default/prometheus" err = hctrl.syncWPA(fakeKey) - require.Error(t, err, errors.IsNotFound) + require.Error(t, err, pkgerrors.IsNotFound) } // TestWPAGC tests the GC process of of the controller @@ -555,7 +556,7 @@ func TestUnstructuredIntoWPA(t *testing.T) { caseName: "obj corrupted", obj: map[string]interface{}{}, expectedWpa: nil, - error: fmt.Errorf("could not cast Unstructured object: map[]"), + error: errors.New("could not cast Unstructured object: map[]"), }, { caseName: "All good", @@ -601,7 +602,7 @@ func TestWPACRDCheck(t *testing.T) { Group: "datadoghq.com", Resource: "watermarkpodautoscalers", }, "") - nonRetryableError := fmt.Errorf("unexpectedError") + nonRetryableError := errors.New("unexpectedError") testCases := []struct { caseName string checkError error diff --git a/pkg/util/kubernetes/apiserver/events.go b/pkg/util/kubernetes/apiserver/events.go index c1b5aae603c405..f14f94355af0e3 100644 --- a/pkg/util/kubernetes/apiserver/events.go +++ b/pkg/util/kubernetes/apiserver/events.go @@ -11,6 +11,7 @@ package apiserver import ( "context" + "errors" "fmt" "strconv" "time" @@ -60,12 +61,12 @@ func (c *APIClient) RunEventCollection(resVer string, lastListTime time.Time, ev select { case rcv, ok := <-evWatcher.ResultChan(): if !ok { - return added, resVer, lastListTime, fmt.Errorf("Unexpected watch close") + return added, resVer, lastListTime, errors.New("Unexpected watch close") } if rcv.Type == watch.Error { status, ok := rcv.Object.(*metav1.Status) if !ok { - return added, resVer, lastListTime, fmt.Errorf("Could not unmarshall the status of the event") + return added, resVer, lastListTime, errors.New("Could not unmarshall the status of the event") } switch status.Reason { // Using a switch as there are a lot of different types and we might want to explore adapting the behaviour for certain ones in the future. diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go index 2c55b5042b8ce5..28d9231e74254d 100644 --- a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go @@ -12,6 +12,7 @@ package leaderelection import ( "context" "encoding/json" + "errors" "fmt" "sync" "time" @@ -104,7 +105,7 @@ func (le *LeaderEngine) initialize() *retry.Error { // GetLeaderEngine returns an initialized leader engine. func GetLeaderEngine() (*LeaderEngine, error) { if globalLeaderEngine == nil { - return nil, fmt.Errorf("Global Leader Engine was not created") + return nil, errors.New("Global Leader Engine was not created") } err := globalLeaderEngine.initialize() if err != nil { diff --git a/pkg/util/kubernetes/apiserver/resourcetypes.go b/pkg/util/kubernetes/apiserver/resourcetypes.go index cef7493bf3498f..76e31dd8205b68 100644 --- a/pkg/util/kubernetes/apiserver/resourcetypes.go +++ b/pkg/util/kubernetes/apiserver/resourcetypes.go @@ -9,6 +9,7 @@ package apiserver import ( "context" + "errors" "fmt" "strings" "sync" @@ -83,7 +84,7 @@ func InitializeGlobalResourceTypeCache(discoveryClient discovery.DiscoveryInterf // GetResourceType retrieves the resource type for the given kind and group. func GetResourceType(kind, group string) (string, error) { if resourceCache == nil { - return "", fmt.Errorf("resource type cache is not initialized") + return "", errors.New("resource type cache is not initialized") } return resourceCache.getResourceType(kind, group) } @@ -91,7 +92,7 @@ func GetResourceType(kind, group string) (string, error) { // GetResourceKind retrieves the kind given the resource plural name and group. func GetResourceKind(resource, apiGroup string) (string, error) { if resourceCache == nil { - return "", fmt.Errorf("resource type cache is not initialized") + return "", errors.New("resource type cache is not initialized") } return resourceCache.getResourceKind(resource, apiGroup) diff --git a/pkg/util/kubernetes/autoscalers/autoscalers_test.go b/pkg/util/kubernetes/autoscalers/autoscalers_test.go index b2d6d4b19bbfa0..07e7aa76f3fda3 100644 --- a/pkg/util/kubernetes/autoscalers/autoscalers_test.go +++ b/pkg/util/kubernetes/autoscalers/autoscalers_test.go @@ -8,7 +8,7 @@ package autoscalers import ( - "fmt" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -481,7 +481,7 @@ func TestDiffExternalMetrics(t *testing.T) { []metav1.Object{ &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(5)), + UID: types.UID(strconv.Itoa(5)), Namespace: "nsbar", Name: "foo", }, @@ -489,7 +489,7 @@ func TestDiffExternalMetrics(t *testing.T) { }, &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(7)), + UID: types.UID(strconv.Itoa(7)), Namespace: "zanzi", Name: "bar", }, @@ -499,7 +499,7 @@ func TestDiffExternalMetrics(t *testing.T) { []*v1alpha1.WatermarkPodAutoscaler{ { ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(9)), + UID: types.UID(strconv.Itoa(9)), Namespace: "nsbar", Name: "foo", }, @@ -507,7 +507,7 @@ func TestDiffExternalMetrics(t *testing.T) { }, { ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(11)), + UID: types.UID(strconv.Itoa(11)), Namespace: "zanzi", Name: "bar", }, @@ -521,7 +521,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: true, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(5), + UID: strconv.Itoa(5), Name: "foo", Namespace: "nsbar", }, @@ -532,7 +532,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(6), + UID: strconv.Itoa(6), Name: "foo", Namespace: "baz", }, @@ -543,7 +543,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Name: "bar", Namespace: "zanzi", }, @@ -554,7 +554,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "watermark", - UID: fmt.Sprint(9), + UID: strconv.Itoa(9), Name: "bar", Namespace: "zanzi", }, @@ -567,7 +567,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(6), + UID: strconv.Itoa(6), Name: "foo", Namespace: "baz", }, @@ -578,7 +578,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "watermark", - UID: fmt.Sprint(9), + UID: strconv.Itoa(9), Name: "bar", Namespace: "zanzi", }, @@ -589,7 +589,7 @@ func TestDiffExternalMetrics(t *testing.T) { []metav1.Object{ &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(5)), + UID: types.UID(strconv.Itoa(5)), Namespace: "bar", Name: "foo", }, @@ -597,7 +597,7 @@ func TestDiffExternalMetrics(t *testing.T) { }, &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(7)), + UID: types.UID(strconv.Itoa(7)), Namespace: "baz", Name: "foo", }, @@ -612,7 +612,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: true, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(5), + UID: strconv.Itoa(5), Namespace: "bar", Name: "foo", }, @@ -623,7 +623,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Namespace: "baz", Name: "foo", }, @@ -636,7 +636,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Name: "foo", Namespace: "baz", }, @@ -648,7 +648,7 @@ func TestDiffExternalMetrics(t *testing.T) { []*v1alpha1.WatermarkPodAutoscaler{ { ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(7)), + UID: types.UID(strconv.Itoa(7)), Namespace: "zanzi", Name: "bar", }, @@ -661,7 +661,7 @@ func TestDiffExternalMetrics(t *testing.T) { Labels: map[string]string{"foo": "tu"}, Valid: true, Ref: custommetrics.ObjectReference{ - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Name: "bar", Namespace: "zanzi", }, @@ -673,7 +673,7 @@ func TestDiffExternalMetrics(t *testing.T) { Labels: map[string]string{"foo": "tu"}, Valid: true, Ref: custommetrics.ObjectReference{ - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Name: "bar", Namespace: "zanzi", }, @@ -684,7 +684,7 @@ func TestDiffExternalMetrics(t *testing.T) { []metav1.Object{ &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(5)), + UID: types.UID(strconv.Itoa(5)), Namespace: "bar", Name: "foo", }, @@ -692,7 +692,7 @@ func TestDiffExternalMetrics(t *testing.T) { }, &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(7)), + UID: types.UID(strconv.Itoa(7)), Namespace: "baz", Name: "foo", }, @@ -707,7 +707,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: true, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(5), + UID: strconv.Itoa(5), Namespace: "bar", Name: "foo", }, @@ -718,7 +718,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Namespace: "baz", Name: "foo", }, @@ -731,7 +731,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Name: "foo", Namespace: "baz", }, @@ -742,7 +742,7 @@ func TestDiffExternalMetrics(t *testing.T) { []metav1.Object{ &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(5)), + UID: types.UID(strconv.Itoa(5)), Namespace: "bar", Name: "foo", }, @@ -750,7 +750,7 @@ func TestDiffExternalMetrics(t *testing.T) { }, &autoscalingv2.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - UID: types.UID(fmt.Sprint(7)), + UID: types.UID(strconv.Itoa(7)), Namespace: "baz", Name: "foo", }, @@ -765,7 +765,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: true, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(5), + UID: strconv.Itoa(5), Namespace: "bar", Name: "foo", }, @@ -776,7 +776,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Namespace: "baz", Name: "foo", }, @@ -789,7 +789,7 @@ func TestDiffExternalMetrics(t *testing.T) { Valid: false, Ref: custommetrics.ObjectReference{ Type: "horizontal", - UID: fmt.Sprint(7), + UID: strconv.Itoa(7), Name: "foo", Namespace: "baz", }, diff --git a/pkg/util/kubernetes/autoscalers/datadogexternal_test.go b/pkg/util/kubernetes/autoscalers/datadogexternal_test.go index db399bcd5c93f7..0d58f6e710807e 100644 --- a/pkg/util/kubernetes/autoscalers/datadogexternal_test.go +++ b/pkg/util/kubernetes/autoscalers/datadogexternal_test.go @@ -8,7 +8,7 @@ package autoscalers import ( - "fmt" + "errors" "testing" "time" @@ -42,11 +42,11 @@ func TestDatadogExternalQuery(t *testing.T) { { "metricName yields rate limiting error response from Datadog", func(int64, int64, string) ([]datadog.Series, error) { - return nil, fmt.Errorf("Rate limit of 300 requests in 3600 seconds") + return nil, errors.New("Rate limit of 300 requests in 3600 seconds") }, []string{"avg:mymetric{foo:bar}.rollup(30)"}, nil, - fmt.Errorf("Rate limit of 300 requests in 3600 seconds"), + errors.New("Rate limit of 300 requests in 3600 seconds"), }, { "metrics with different granularities Datadog", diff --git a/pkg/util/kubernetes/autoscalers/processor.go b/pkg/util/kubernetes/autoscalers/processor.go index 2cfe2452ea1e67..3e9e7c3fe09593 100644 --- a/pkg/util/kubernetes/autoscalers/processor.go +++ b/pkg/util/kubernetes/autoscalers/processor.go @@ -8,6 +8,7 @@ package autoscalers import ( + "errors" "fmt" "maps" "math" @@ -229,7 +230,7 @@ func isURLBeyondLimits(uriLength, numBuckets int) (bool, error) { // Autoscalers with enough labels to form single a query of more than 7k characters are not supported. lengthOverspill := uriLength >= maxCharactersPerChunk if lengthOverspill && numBuckets == 0 { - return true, fmt.Errorf("Query is too long, could yield a server side error. Dropping") + return true, errors.New("Query is too long, could yield a server side error. Dropping") } chunkSize := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.chunk_size") @@ -268,7 +269,7 @@ func getKey(name string, labels map[string]string, aggregator string, rollup int var result string if len(labels) == 0 { - result = fmt.Sprintf("%s{*}", name) + result = name + "{*}" } else { datadogTags := []string{} for key, val := range labels { diff --git a/pkg/util/kubernetes/autoscalers/processor_test.go b/pkg/util/kubernetes/autoscalers/processor_test.go index 22f8c41d91db07..b174e01d7602b5 100644 --- a/pkg/util/kubernetes/autoscalers/processor_test.go +++ b/pkg/util/kubernetes/autoscalers/processor_test.go @@ -8,6 +8,7 @@ package autoscalers import ( + "errors" "fmt" "reflect" "strings" @@ -218,7 +219,7 @@ func TestProcessor_UpdateExternalMetrics(t *testing.T) { } datadogClientComp := datadogclientmock.New(t).Comp datadogClientComp.SetQueryMetricsFunc(func(int64, int64, string) ([]datadog.Series, error) { - return nil, fmt.Errorf("API error 400 Bad Request: {\"error\": [\"Rate limit of 300 requests in 3600 seconds reqchec.\"]}") + return nil, errors.New("API error 400 Bad Request: {\"error\": [\"Rate limit of 300 requests in 3600 seconds reqchec.\"]}") }) hpaCl := &Processor{datadogClient: datadogClientComp, externalMaxAge: testMaxAge, parallelQueries: testParallelQueries} @@ -364,7 +365,7 @@ func TestValidateExternalMetricsBatching(t *testing.T) { // Error will be under the format: // Error: Error while executing metric query avg:foo-56{foo:bar}.rollup(30),avg:foo-93{foo:bar}.rollup(30),[...],avg:foo-64{foo:bar}.rollup(30),avg:foo-81{foo:bar}.rollup(30): Networking Error, timeout!!! // In the logs, we will be able to see which bundle failed, but for the tests, we can't know which routine will finish first (and therefore have `bc == 1`), so we only check the error returned by the Datadog Servers. - return nil, fmt.Errorf("networking Error, timeout") + return nil, errors.New("networking Error, timeout") } return tt.out, nil }) @@ -591,7 +592,7 @@ func TestUpdateRateLimiting(t *testing.T) { Period: 3600, Reset: 11, }, - error: fmt.Errorf("strconv.Atoi: parsing \"\": invalid syntax"), + error: errors.New("strconv.Atoi: parsing \"\": invalid syntax"), }, { desc: "Missing headers case", @@ -606,7 +607,7 @@ func TestUpdateRateLimiting(t *testing.T) { Period: 3600, }, // Although several headers are missing, the Aggregate will only return 1 error as they are the same - error: fmt.Errorf("strconv.Atoi: parsing \"\": invalid syntax"), + error: errors.New("strconv.Atoi: parsing \"\": invalid syntax"), }, } diff --git a/pkg/util/kubernetes/helpers_test.go b/pkg/util/kubernetes/helpers_test.go index 899ba603a2c0e1..97aa2689e6917a 100644 --- a/pkg/util/kubernetes/helpers_test.go +++ b/pkg/util/kubernetes/helpers_test.go @@ -6,7 +6,6 @@ package kubernetes import ( - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -34,7 +33,7 @@ func TestParseDeploymentForReplicaSet(t *testing.T) { "frontend-5f": "", // too short "frontend-56a89cfff7": "", // no vowels allowed } { - t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + t.Run("case: "+in, func(t *testing.T) { assert.Equal(t, out, ParseDeploymentForReplicaSet(in)) }) } @@ -62,7 +61,7 @@ func TestParseDeploymentForPodName(t *testing.T) { "frontend-56c89cff-bx": "", // too short "frontend-56a89cfff7-a": "", // no vowels allowed } { - t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + t.Run("case: "+in, func(t *testing.T) { assert.Equal(t, out, ParseDeploymentForPodName(in)) }) } @@ -90,7 +89,7 @@ func TestParseReplicaSetForPodName(t *testing.T) { "frontend-56c89cff-bx": "", // too short "frontend-56a89cfff7-a": "", // no vowels allowed } { - t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + t.Run("case: "+in, func(t *testing.T) { assert.Equal(t, out, ParseReplicaSetForPodName(in)) }) } @@ -111,7 +110,7 @@ func TestParseCronJobForJob(t *testing.T) { "hello-60": {"", 0}, "hello-1562319a60": {"", 0}, } { - t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + t.Run("case: "+in, func(t *testing.T) { cronjobName, id := ParseCronJobForJob(in) assert.Equal(t, out, struct { string diff --git a/pkg/util/kubernetes/hostname_stub.go b/pkg/util/kubernetes/hostname_stub.go index 7b25535c077cfd..e09a575814ffab 100644 --- a/pkg/util/kubernetes/hostname_stub.go +++ b/pkg/util/kubernetes/hostname_stub.go @@ -9,10 +9,10 @@ package kubernetes import ( "context" - "fmt" + "errors" ) // GetKubeAPIServerHostname returns the hostname from kubeapiserver func GetKubeAPIServerHostname(context.Context) (string, error) { - return "", fmt.Errorf("kubeapiserver hostname provider is not enabled") + return "", errors.New("kubeapiserver hostname provider is not enabled") } diff --git a/pkg/util/kubernetes/kubelet/kubelet.go b/pkg/util/kubernetes/kubelet/kubelet.go index 4f8524ffa2a1e4..a365feae274edb 100644 --- a/pkg/util/kubernetes/kubelet/kubelet.go +++ b/pkg/util/kubernetes/kubelet/kubelet.go @@ -13,6 +13,7 @@ import ( "fmt" "io" "net/http" + "strconv" "sync" "time" @@ -86,7 +87,7 @@ func (ku *KubeUtil) init() error { ku.rawConnectionInfo["url"] = ku.kubeletClient.kubeletURL if ku.kubeletClient.config.scheme == "https" { - ku.rawConnectionInfo["verify_tls"] = fmt.Sprintf("%v", ku.kubeletClient.config.tlsVerify) + ku.rawConnectionInfo["verify_tls"] = strconv.FormatBool(ku.kubeletClient.config.tlsVerify) if ku.kubeletClient.config.caPath != "" { ku.rawConnectionInfo["ca_cert"] = ku.kubeletClient.config.caPath } @@ -174,7 +175,7 @@ func GetKubeUtil() (KubeUtilInterface, error) { func (ku *KubeUtil) StreamLogs(ctx context.Context, podNamespace, podName, containerName string, logOptions *StreamLogOptions) (io.ReadCloser, error) { query := fmt.Sprintf("follow=%t×tamps=%t", logOptions.Follow, logOptions.Timestamps) if logOptions.SinceTime != nil { - query += fmt.Sprintf("&sinceTime=%s", logOptions.SinceTime.Format(time.RFC3339)) + query += "&sinceTime=" + logOptions.SinceTime.Format(time.RFC3339) } path := fmt.Sprintf("/containerLogs/%s/%s/%s?%s", podNamespace, podName, containerName, query) return ku.kubeletClient.queryWithResp(ctx, path) diff --git a/pkg/util/kubernetes/kubelet/kubelet_client.go b/pkg/util/kubernetes/kubelet/kubelet_client.go index d6ae316ab859aa..63785b2db00434 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_client.go +++ b/pkg/util/kubernetes/kubelet/kubelet_client.go @@ -237,7 +237,7 @@ func getKubeletClient(ctx context.Context) (*kubeletClient, error) { apiServerHost := os.Getenv("KUBERNETES_SERVICE_HOST") apiServerPort := os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS") if apiServerHost == "" || apiServerPort == "" { - return nil, fmt.Errorf("failed to determine API server host/port") + return nil, errors.New("failed to determine API server host/port") } clientConfig.useAPIServer = true @@ -303,7 +303,7 @@ func getKubeletClient(ctx context.Context) (*kubeletClient, error) { return newForConfig(clientConfig, kubeletTimeout) } - return nil, fmt.Errorf("Invalid Kubelet configuration: both HTTPS and HTTP ports are disabled") + return nil, errors.New("Invalid Kubelet configuration: both HTTPS and HTTP ports are disabled") } func checkKubeletConnection(ctx context.Context, scheme string, port int, prefix string, hosts *connectionInfo, clientConfig *kubeletClientConfig) error { diff --git a/pkg/util/kubernetes/kubelet/kubelet_common_test.go b/pkg/util/kubernetes/kubelet/kubelet_common_test.go index 9631f252807c72..8d5ed174a34eeb 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_common_test.go +++ b/pkg/util/kubernetes/kubelet/kubelet_common_test.go @@ -8,7 +8,6 @@ package kubelet import ( - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -49,7 +48,7 @@ func TestKubeContainerIDToTaggerEntityID(t *testing.T) { "/deadbeef": "", "runtime://foo/bar": "container_id://foo/bar", } { - t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + t.Run("case: "+in, func(t *testing.T) { res, _ := KubeContainerIDToTaggerEntityID(in) assert.Equal(t, out, res.String()) }) @@ -66,7 +65,7 @@ func TestKubePodUIDToTaggerEntityID(t *testing.T) { "deadbeef": "", "/deadbeef": "", } { - t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + t.Run("case: "+in, func(t *testing.T) { res, _ := KubePodUIDToTaggerEntityID(in) assert.Equal(t, out, res.String()) }) @@ -83,7 +82,7 @@ func TestKubeIDToTaggerEntityID(t *testing.T) { "deadbeef": "", "/deadbeef": "", } { - t.Run(fmt.Sprintf("case: %s", in), func(t *testing.T) { + t.Run("case: "+in, func(t *testing.T) { res, _ := KubeIDToTaggerEntityID(in) assert.Equal(t, out, res.String()) }) diff --git a/pkg/util/kubernetes/kubelet/kubelet_test.go b/pkg/util/kubernetes/kubelet/kubelet_test.go index c093d458cf3715..60bab508ca1549 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_test.go +++ b/pkg/util/kubernetes/kubelet/kubelet_test.go @@ -12,6 +12,7 @@ import ( "crypto/rsa" "crypto/x509" "encoding/pem" + "errors" "fmt" "net/http" "net/http/httptest" @@ -30,7 +31,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/create" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" - "github.com/DataDog/datadog-agent/pkg/errors" + pkgerrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/log" pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) @@ -173,7 +174,7 @@ func pemBlockForKey(privateKey interface{}) (*pem.Block, error) { return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}, nil default: - return nil, fmt.Errorf("unrecognized format for privateKey") + return nil, errors.New("unrecognized format for privateKey") } } @@ -326,7 +327,7 @@ func (suite *KubeletTestSuite) TestGetLocalPodListWithBrokenKubelet() { pods, err := kubeutil.GetLocalPodList(ctx) require.NotNil(suite.T(), err) require.Len(suite.T(), pods, 0) - require.True(suite.T(), errors.IsRetriable(err)) + require.True(suite.T(), pkgerrors.IsRetriable(err)) } func (suite *KubeletTestSuite) TestGetNodenameStatsSummary() { diff --git a/pkg/util/kubernetes/kubelet/metadata.go b/pkg/util/kubernetes/kubelet/metadata.go index 5a973b94c9f878..7246ef904885dc 100644 --- a/pkg/util/kubernetes/kubelet/metadata.go +++ b/pkg/util/kubernetes/kubelet/metadata.go @@ -10,7 +10,6 @@ package kubelet import ( "context" "errors" - "fmt" "regexp" "github.com/DataDog/datadog-agent/pkg/config/env" @@ -40,7 +39,7 @@ func GetMetadata() (map[string]string, error) { re := regexp.MustCompile("(?:gitVersion|git_version)=\"(.*?)\"") matches := re.FindStringSubmatch(metric) if len(matches) < 1 { - return nil, fmt.Errorf("couldn't find kubelet git version") + return nil, errors.New("couldn't find kubelet git version") } return map[string]string{ "kubelet_version": matches[1], diff --git a/pkg/util/kubernetes/kubelet/podresources.go b/pkg/util/kubernetes/kubelet/podresources.go index 7d6019d39de50a..08f0ed5b518bb8 100644 --- a/pkg/util/kubernetes/kubelet/podresources.go +++ b/pkg/util/kubernetes/kubelet/podresources.go @@ -9,6 +9,7 @@ package kubelet import ( "context" + "errors" "fmt" "runtime" @@ -42,7 +43,7 @@ type ContainerKey struct { func NewPodResourcesClient(config config.Component) (*PodResourcesClient, error) { podResourcesSocket := config.GetString("kubernetes_kubelet_podresources_socket") if podResourcesSocket == "" { - return nil, fmt.Errorf("kubernetes_kubelet_podresources_socket is not set") + return nil, errors.New("kubernetes_kubelet_podresources_socket is not set") } socketPrefix := "unix://" diff --git a/pkg/util/log/setup/log_format.go b/pkg/util/log/setup/log_format.go index 374480ab2f812f..99e63786081604 100644 --- a/pkg/util/log/setup/log_format.go +++ b/pkg/util/log/setup/log_format.go @@ -40,7 +40,7 @@ func buildJSONFormat(loggerName LoggerName, cfg pkgconfigmodel.Reader) string { func commonFormatter(loggerName LoggerName, cfg pkgconfigmodel.Reader) func(ctx context.Context, r slog.Record) string { if loggerName == "JMXFETCH" { return func(_ context.Context, r slog.Record) string { - return fmt.Sprintf("%s\n", r.Message) + return r.Message + "\n" } } dateFmt := formatters.Date(cfg.GetBool("log_format_rfc3339")) diff --git a/pkg/util/log/slog/filewriter/file_writer.go b/pkg/util/log/slog/filewriter/file_writer.go index fc0ec3c6ee3e06..ef81c4eb1eff61 100644 --- a/pkg/util/log/slog/filewriter/file_writer.go +++ b/pkg/util/log/slog/filewriter/file_writer.go @@ -359,7 +359,7 @@ func (rws *RollingFileWriterSize) getNewHistoryRollFileName(otherLogFiles []stri latest := otherLogFiles[len(otherLogFiles)-1] v, _ = strconv.Atoi(rws.getFileRollName(latest)) } - return fmt.Sprintf("%d", v+1) + return strconv.Itoa(v + 1) } func (rws *RollingFileWriterSize) getCurrentFileName() string { diff --git a/pkg/util/log/zap/zapcore_test.go b/pkg/util/log/zap/zapcore_test.go index 617dc59d8e7119..30c7fdc7f553a8 100644 --- a/pkg/util/log/zap/zapcore_test.go +++ b/pkg/util/log/zap/zapcore_test.go @@ -8,7 +8,7 @@ package log import ( "bufio" "bytes" - "fmt" + "errors" "testing" "github.com/cihub/seelog" @@ -75,7 +75,7 @@ func TestZapBasicLogging(t *testing.T) { }, { desc: "Error (fields)", - log: func(l *zap.Logger) { l.Error("Fields", zap.Error(fmt.Errorf("an error"))) }, + log: func(l *zap.Logger) { l.Error("Fields", zap.Error(errors.New("an error"))) }, level: "debug", message: "[ERROR] | zapcore_test.go | error:an error | Fields", }, diff --git a/pkg/util/lsof/lsof_linux.go b/pkg/util/lsof/lsof_linux.go index 62a287133a039b..424a6e1a600830 100644 --- a/pkg/util/lsof/lsof_linux.go +++ b/pkg/util/lsof/lsof_linux.go @@ -139,7 +139,7 @@ func (ofl *openFilesLister) mmapMetadata() (Files, error) { } func permToString(perms *procfs.ProcMapPermissions) string { - s := "" + var builder strings.Builder for _, perm := range []struct { set bool @@ -153,13 +153,13 @@ func permToString(perms *procfs.ProcMapPermissions) string { {perms.Shared, "s", ""}, } { if perm.set { - s += perm.charSet + builder.WriteString(perm.charSet) } else { - s += perm.charUnset + builder.WriteString(perm.charUnset) } } - return s + return builder.String() } func mmapFD(path string, fileType, cwd string) string { diff --git a/pkg/util/pdhutil/pdhcounter.go b/pkg/util/pdhutil/pdhcounter.go index a92e39fd21394d..90916bc8d1cb36 100644 --- a/pkg/util/pdhutil/pdhcounter.go +++ b/pkg/util/pdhutil/pdhcounter.go @@ -7,6 +7,7 @@ package pdhutil import ( + "errors" "fmt" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -120,7 +121,7 @@ func (counter *pdhCounter) ShouldInit() bool { } var initFailLimit = pkgconfigsetup.Datadog().GetInt("windows_counter_init_failure_limit") if initFailLimit > 0 && counter.initFailCount >= initFailLimit { - counter.initError = fmt.Errorf("counter exceeded the maximum number of failed initialization attempts. This error indicates that the Windows performance counter database may need to be rebuilt") + counter.initError = errors.New("counter exceeded the maximum number of failed initialization attempts. This error indicates that the Windows performance counter database may need to be rebuilt") // attempts exceeded return false } @@ -148,7 +149,7 @@ func (counter *pdhCounter) SetInitError(err error) error { func (counter *pdhCounter) Remove() error { if counter.handle == PDH_HCOUNTER(0) { - return fmt.Errorf("counter is not initialized") + return errors.New("counter is not initialized") } pdherror := pfnPdhRemoveCounter(counter.handle) @@ -292,7 +293,7 @@ func (counter *PdhEnglishMultiInstanceCounter) GetAllValues() (values map[string if counter.initError != nil { return nil, counter.initError } - return nil, fmt.Errorf("counter is not initialized") + return nil, errors.New("counter is not initialized") } // fetch data items, err := pfnPdhGetFormattedCounterArray(counter.handle, PDH_FMT_DOUBLE) @@ -326,7 +327,7 @@ func (counter *PdhEnglishSingleInstanceCounter) GetValue() (float64, error) { if counter.initError != nil { return 0, counter.initError } - return 0, fmt.Errorf("counter is not initialized") + return 0, errors.New("counter is not initialized") } // fetch data return pfnPdhGetFormattedCounterValueFloat(counter.handle) @@ -362,7 +363,7 @@ func (query *PdhQuery) Close() { // https://learn.microsoft.com/en-us/windows/win32/api/pdh/nf-pdh-pdhcollectquerydata func PdhCollectQueryData(hQuery PDH_HQUERY) error { if hQuery == PDH_HQUERY(0) { - return fmt.Errorf("invalid query handle") + return errors.New("invalid query handle") } pdherror := pfnPdhCollectQueryData(hQuery) if windows.ERROR_SUCCESS != windows.Errno(pdherror) { diff --git a/pkg/util/pdhutil/pdhmocks_windows.go b/pkg/util/pdhutil/pdhmocks_windows.go index f397c64ff27c0b..4eebf3632db857 100644 --- a/pkg/util/pdhutil/pdhmocks_windows.go +++ b/pkg/util/pdhutil/pdhmocks_windows.go @@ -8,6 +8,7 @@ package pdhutil import ( + "errors" "fmt" "regexp" "strings" @@ -99,7 +100,7 @@ func mockCounterFromHandle(hCounter PDH_HCOUNTER) (mockCounter, error) { } } if !ok { - return ctr, fmt.Errorf("Invalid handle") + return ctr, errors.New("Invalid handle") } return ctr, nil diff --git a/pkg/util/port/portlist/netstat_windows.go b/pkg/util/port/portlist/netstat_windows.go index b36f0a12f2429e..cd1c5ed6d2ff37 100644 --- a/pkg/util/port/portlist/netstat_windows.go +++ b/pkg/util/port/portlist/netstat_windows.go @@ -10,6 +10,7 @@ import ( "fmt" "math/bits" "net/netip" + "strconv" "unsafe" "golang.org/x/sys/cpu" @@ -188,7 +189,7 @@ func (m *_MIB_TCP6TABLE_OWNER_MODULE) getRows() []_MIB_TCP6ROW_OWNER_MODULE { func ipport6(addr [16]byte, scope uint32, port uint16) netip.AddrPort { ip := netip.AddrFrom16(addr).Unmap() if scope != 0 { - ip = ip.WithZone(fmt.Sprint(scope)) + ip = ip.WithZone(strconv.Itoa(int(scope))) } return netip.AddrPortFrom(ip, port) } diff --git a/pkg/util/strings/matcher_test.go b/pkg/util/strings/matcher_test.go index 5c71d28454d738..f86a0da4332032 100644 --- a/pkg/util/strings/matcher_test.go +++ b/pkg/util/strings/matcher_test.go @@ -8,6 +8,7 @@ package strings import ( "fmt" "math/rand" + stdstrings "strings" "testing" "github.com/stretchr/testify/assert" @@ -56,12 +57,12 @@ func TestIsStringMatching(t *testing.T) { func randomString(size uint) string { letterBytes := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - var str string + var builder stdstrings.Builder for range size { - str += string(letterBytes[rand.Intn(len(letterBytes))]) + builder.WriteByte(letterBytes[rand.Intn(len(letterBytes))]) } - return str + return builder.String() } func BenchmarkStringsMatcher(b *testing.B) { diff --git a/pkg/util/system/network_windows.go b/pkg/util/system/network_windows.go index 5a3f7a921d8e51..e7a325b94b6696 100644 --- a/pkg/util/system/network_windows.go +++ b/pkg/util/system/network_windows.go @@ -10,7 +10,7 @@ package system import ( "bufio" "bytes" - "fmt" + "errors" "net" "os/exec" "strings" @@ -110,5 +110,5 @@ func defaultGatewayFields() ([]string, error) { return fields, nil } } - return nil, fmt.Errorf("couldn't retrieve default gateway information") + return nil, errors.New("couldn't retrieve default gateway information") } diff --git a/pkg/util/testutil/flake/flake.go b/pkg/util/testutil/flake/flake.go index 685c655c514485..b2aa6cb9cba741 100644 --- a/pkg/util/testutil/flake/flake.go +++ b/pkg/util/testutil/flake/flake.go @@ -10,8 +10,8 @@ package flake import ( + "errors" "flag" - "fmt" "os" "path/filepath" "runtime" @@ -58,7 +58,7 @@ func getPackageName() (string, error) { } if fullPackageName == "" { - return "", fmt.Errorf("failed to fetch e2e test function information") + return "", errors.New("failed to fetch e2e test function information") } prefix := filepath.FromSlash("github.com/DataDog/datadog-agent/") diff --git a/pkg/util/trivy/cache.go b/pkg/util/trivy/cache.go index 46944beba73894..d4ec745bc244b4 100644 --- a/pkg/util/trivy/cache.go +++ b/pkg/util/trivy/cache.go @@ -297,7 +297,7 @@ func (c *persistentCache) Clear() error { func (c *persistentCache) removeOldest() error { key, ok := c.removeOldestKeyFromMemory() if !ok { - return fmt.Errorf("in-memory cache is empty") + return errors.New("in-memory cache is empty") } evicted := 0 @@ -331,7 +331,7 @@ func (c *persistentCache) reduceSize(target int) error { } if prev == c.currentCachedObjectTotalSize { // if c.currentCachedObjectTotalSize is not updated by removeOldest then an item is stored in the lrucache without being stored in the local storage - return fmt.Errorf("cache and db are out of sync") + return errors.New("cache and db are out of sync") } } return nil @@ -388,7 +388,7 @@ func (c *persistentCache) Set(key string, value []byte) error { func (c *persistentCache) Get(key string) ([]byte, error) { ok := c.Contains(key) if !ok { - return nil, fmt.Errorf("key not found") + return nil, errors.New("key not found") } res, err := c.db.Get(key) diff --git a/pkg/util/trivy/docker.go b/pkg/util/trivy/docker.go index bd310273f5d694..3b745af6554652 100644 --- a/pkg/util/trivy/docker.go +++ b/pkg/util/trivy/docker.go @@ -9,6 +9,7 @@ package trivy import ( "context" + "errors" "fmt" "os" "strings" @@ -49,7 +50,7 @@ func convertDockerImage(ctx context.Context, client client.ImageAPIClient, imgMe f, err := os.CreateTemp("", "fanal-docker-*") if err != nil { - return nil, cleanup, fmt.Errorf("failed to create a temporary file") + return nil, cleanup, errors.New("failed to create a temporary file") } cleanup = func() { diff --git a/pkg/util/winutil/eventlog/api/fake/wevtapi.go b/pkg/util/winutil/eventlog/api/fake/wevtapi.go index 32f442b1fb8839..7a882e5d362fac 100644 --- a/pkg/util/winutil/eventlog/api/fake/wevtapi.go +++ b/pkg/util/winutil/eventlog/api/fake/wevtapi.go @@ -13,6 +13,7 @@ package fakeevtapi import ( "bytes" "encoding/hex" + "errors" "fmt" "regexp" "sort" @@ -38,11 +39,11 @@ func (api *API) EvtSubscribe( Flags uint) (evtapi.EventResultSetHandle, error) { if Query != "" && Query != "*" { - return evtapi.EventResultSetHandle(0), fmt.Errorf("Fake API does not support query syntax") + return evtapi.EventResultSetHandle(0), errors.New("Fake API does not support query syntax") } if Session != evtapi.EventSessionHandle(0) { - return evtapi.EventResultSetHandle(0), fmt.Errorf("Fake API does not support remote sessions") + return evtapi.EventResultSetHandle(0), errors.New("Fake API does not support remote sessions") } // ensure channel exists @@ -79,7 +80,7 @@ func (api *API) EvtSubscribe( } else { // bookmarked event is no longer in the log if Flags&evtapi.EvtSubscribeStrict == evtapi.EvtSubscribeStrict { - return evtapi.EventResultSetHandle(0), fmt.Errorf("bookmark not found and Strict flag set") + return evtapi.EventResultSetHandle(0), errors.New("bookmark not found and Strict flag set") } // MSDN says // If you do not include the EvtSubscribeStrict flag and the bookmarked event does not exist, @@ -117,11 +118,11 @@ func (api *API) EvtQuery( // For the fake implementation, we'll reuse the subscription logic // but return immediately instead of setting up event notification if Query != "" && Query != "*" && !strings.HasPrefix(Query, "") { - return evtapi.EventResultSetHandle(0), fmt.Errorf("Fake API does not support query syntax") + return evtapi.EventResultSetHandle(0), errors.New("Fake API does not support query syntax") } if Session != evtapi.EventSessionHandle(0) { - return evtapi.EventResultSetHandle(0), fmt.Errorf("Fake API does not support remote sessions") + return evtapi.EventResultSetHandle(0), errors.New("Fake API does not support remote sessions") } // For multi-channel queries (XML QueryList), just use the first channel for now @@ -444,12 +445,12 @@ func (api *API) EvtCreateBookmark(xml string) (evtapi.EventBookmarkHandle, error re := regexp.MustCompile(`RecordId="(\d+)"`) match := re.FindStringSubmatch(xml) if len(match) != 2 { - return evtapi.EventBookmarkHandle(0), fmt.Errorf("invalid bookmark XML") + return evtapi.EventBookmarkHandle(0), errors.New("invalid bookmark XML") } recordID := match[1] recordIDUint, err := strconv.ParseUint(recordID, 10, 64) if err != nil { - return evtapi.EventBookmarkHandle(0), fmt.Errorf("invalid bookmark XML") + return evtapi.EventBookmarkHandle(0), errors.New("invalid bookmark XML") } b.eventRecordID = uint(recordIDUint) api.addBookmark(&b) @@ -482,13 +483,13 @@ func (api *API) EvtUpdateBookmark(Bookmark evtapi.EventBookmarkHandle, Event evt // not implemented. // https://learn.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtcreaterendercontext func (api *API) EvtCreateRenderContext(_ []string, _ uint) (evtapi.EventRenderContextHandle, error) { - return evtapi.EventRenderContextHandle(0), fmt.Errorf("not implemented") + return evtapi.EventRenderContextHandle(0), errors.New("not implemented") } // EvtRenderEventValues is a fake of EvtRender with EvtRenderEventValues // https://learn.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtrender func (api *API) EvtRenderEventValues(_ evtapi.EventRenderContextHandle, _ evtapi.EventRecordHandle) (evtapi.EvtVariantValues, error) { - return nil, fmt.Errorf("not implemented") + return nil, errors.New("not implemented") } // EvtOpenPublisherMetadata fake @@ -497,7 +498,7 @@ func (api *API) EvtRenderEventValues(_ evtapi.EventRenderContextHandle, _ evtapi func (api *API) EvtOpenPublisherMetadata( _ string, _ string) (evtapi.EventPublisherMetadataHandle, error) { - return evtapi.EventPublisherMetadataHandle(0), fmt.Errorf("not implemented") + return evtapi.EventPublisherMetadataHandle(0), errors.New("not implemented") } // EvtFormatMessage fake @@ -509,7 +510,7 @@ func (api *API) EvtFormatMessage( _ uint, _ evtapi.EvtVariantValues, _ uint) (string, error) { - return "", fmt.Errorf("not implemented") + return "", errors.New("not implemented") } // EvtOpenSession fake @@ -522,5 +523,5 @@ func (api *API) EvtOpenSession( _ string, _ uint, ) (evtapi.EventSessionHandle, error) { - return evtapi.EventSessionHandle(0), fmt.Errorf("not implemented") + return evtapi.EventSessionHandle(0), errors.New("not implemented") } diff --git a/pkg/util/winutil/eventlog/api/windows/render.go b/pkg/util/winutil/eventlog/api/windows/render.go index 937d3b414411ca..e1c77f042d397a 100644 --- a/pkg/util/winutil/eventlog/api/windows/render.go +++ b/pkg/util/winutil/eventlog/api/windows/render.go @@ -8,6 +8,7 @@ package winevtapi import ( + "errors" "fmt" "unsafe" @@ -111,7 +112,7 @@ func (v *evtVariantValues) SID(index uint) (*windows.SID, error) { // Get a EVT_VARIANT* to an element in the array of structs func (v *evtVariantValues) item(index uint) (*C.EVT_VARIANT, error) { if index >= v.count { - return nil, fmt.Errorf("index out of bounds") + return nil, errors.New("index out of bounds") } // Get a pointer to the structure at index, e.g. &((*EVT_VARIANT)buf)[index] x := (*C.EVT_VARIANT)(unsafe.Add(v.buf, (uintptr)(index)*unsafe.Sizeof(C.EVT_VARIANT{}))) @@ -179,7 +180,7 @@ func evtRenderEventValues(Context evtapi.EventRenderContextHandle, Fragment evta } if BufferUsed == 0 { - return nil, fmt.Errorf("evtRender returned buffer size 0") + return nil, errors.New("evtRender returned buffer size 0") } // Allocate buffer space (BufferUsed is size in bytes) diff --git a/pkg/util/winutil/eventlog/api/windows/wevtapi.go b/pkg/util/winutil/eventlog/api/windows/wevtapi.go index 30a19800e0e25d..c41936c7cdfcce 100644 --- a/pkg/util/winutil/eventlog/api/windows/wevtapi.go +++ b/pkg/util/winutil/eventlog/api/windows/wevtapi.go @@ -9,7 +9,7 @@ package winevtapi import ( - "fmt" + "errors" "unsafe" "github.com/DataDog/datadog-agent/pkg/util/winutil" @@ -137,7 +137,7 @@ func (api *API) EvtNext( var Returned uint32 if len(EventsArray) == 0 { - return nil, fmt.Errorf("input EventsArray is empty") + return nil, errors.New("input EventsArray is empty") } // Fill array @@ -236,7 +236,7 @@ func evtRenderText( Flags uint) ([]uint16, error) { if Flags != evtapi.EvtRenderEventXml && Flags != evtapi.EvtRenderBookmark { - return nil, fmt.Errorf("Invalid Flags") + return nil, errors.New("Invalid Flags") } // Get required buffer size diff --git a/pkg/util/winutil/eventlog/bookmark/bookmark.go b/pkg/util/winutil/eventlog/bookmark/bookmark.go index 8a66fcdbfc0a71..815f7e8c8d372c 100644 --- a/pkg/util/winutil/eventlog/bookmark/bookmark.go +++ b/pkg/util/winutil/eventlog/bookmark/bookmark.go @@ -55,7 +55,7 @@ func New(options ...Option) (Bookmark, error) { if b.bookmarkHandle == evtapi.EventBookmarkHandle(0) { if b.eventLogAPI == nil { - return nil, fmt.Errorf("event log API not set") + return nil, errors.New("event log API not set") } // Create a new empty bookmark bookmarkHandle, err := b.eventLogAPI.EvtCreateBookmark("") @@ -80,10 +80,10 @@ func WithWindowsEventLogAPI(api evtapi.API) Option { func FromFile(bookmarkPath string) Option { return func(b *bookmark) error { if b.eventLogAPI == nil { - return fmt.Errorf("event log API not set") + return errors.New("event log API not set") } if b.bookmarkHandle != evtapi.EventBookmarkHandle(0) { - return fmt.Errorf("bookmark handle already initialized") + return errors.New("bookmark handle already initialized") } // Read bookmark from file bookmarkXML, err := os.ReadFile(bookmarkPath) @@ -98,10 +98,10 @@ func FromFile(bookmarkPath string) Option { func FromXML(bookmarkXML string) Option { return func(b *bookmark) error { if b.eventLogAPI == nil { - return fmt.Errorf("event log API not set") + return errors.New("event log API not set") } if b.bookmarkHandle != evtapi.EventBookmarkHandle(0) { - return fmt.Errorf("bookmark handle already initialized") + return errors.New("bookmark handle already initialized") } // Load bookmark XML bookmarkHandle, err := b.eventLogAPI.EvtCreateBookmark(bookmarkXML) @@ -121,10 +121,10 @@ func (b *bookmark) Handle() evtapi.EventBookmarkHandle { // Update the bookmark to the position of the event record for eventHandle func (b *bookmark) Update(eventHandle evtapi.EventRecordHandle) error { if b.eventLogAPI == nil { - return fmt.Errorf("event log API not set") + return errors.New("event log API not set") } if b.bookmarkHandle == evtapi.EventBookmarkHandle(0) { - return fmt.Errorf("bookmark handle is not initialized") + return errors.New("bookmark handle is not initialized") } return b.eventLogAPI.EvtUpdateBookmark(b.bookmarkHandle, eventHandle) } @@ -132,17 +132,17 @@ func (b *bookmark) Update(eventHandle evtapi.EventRecordHandle) error { // Render the bookmark to an XML string func (b *bookmark) Render() (string, error) { if b.eventLogAPI == nil { - return "", fmt.Errorf("event log API not set") + return "", errors.New("event log API not set") } if b.bookmarkHandle == evtapi.EventBookmarkHandle(0) { - return "", fmt.Errorf("bookmark handle is not initialized") + return "", errors.New("bookmark handle is not initialized") } // Render bookmark buf, err := b.eventLogAPI.EvtRenderBookmark(b.bookmarkHandle) if err != nil { return "", err } else if len(buf) == 0 { - return "", fmt.Errorf("Bookmark is empty") + return "", errors.New("Bookmark is empty") } // Convert to string diff --git a/pkg/util/winutil/eventlog/example_test.go b/pkg/util/winutil/eventlog/example_test.go index 068fa49eb7382d..a8dff42becb846 100644 --- a/pkg/util/winutil/eventlog/example_test.go +++ b/pkg/util/winutil/eventlog/example_test.go @@ -192,7 +192,7 @@ func TestSubscriptionExample(t *testing.T) { eventSource := "dd-test-source-example" numEvents := uint(10) for _, tiName := range testInterfaceNames { - t.Run(fmt.Sprintf("%sAPI", tiName), func(t *testing.T) { + t.Run(tiName+"API", func(t *testing.T) { if tiName == "Fake" { t.Skip("Fake API does not implement EvtRenderValues") } diff --git a/pkg/util/winutil/eventlog/publishermetadatacache/publishermetadatacache_test.go b/pkg/util/winutil/eventlog/publishermetadatacache/publishermetadatacache_test.go index c2ab1a51f00e85..34a5677757e107 100644 --- a/pkg/util/winutil/eventlog/publishermetadatacache/publishermetadatacache_test.go +++ b/pkg/util/winutil/eventlog/publishermetadatacache/publishermetadatacache_test.go @@ -8,7 +8,7 @@ package publishermetadatacache import ( - "fmt" + "errors" "sync" "testing" "time" @@ -92,7 +92,7 @@ func TestPublisherMetadataCache_Get_Error(t *testing.T) { cache := New(mockAPI) publisherName := "NonExistentPublisher" - expectedErr := fmt.Errorf("publisher not found") + expectedErr := errors.New("publisher not found") mockAPI.On("EvtOpenPublisherMetadata", publisherName, "").Return(evtapi.EventPublisherMetadataHandle(0), expectedErr).Once() @@ -121,7 +121,7 @@ func TestPublisherMetadataCache_Get_InvalidHandleExpiration(t *testing.T) { cache.expiration = 10 * time.Millisecond // Short expiration for testing publisherName := "PublisherWithError" - expectedErr := fmt.Errorf("publisher not available") + expectedErr := errors.New("publisher not available") validHandle := evtapi.EventPublisherMetadataHandle(123) // First call returns error @@ -195,7 +195,7 @@ func TestPublisherMetadataCache_FormatMessage_UnexpectedError(t *testing.T) { publisherName := "TestPublisher" eventHandle := evtapi.EventRecordHandle(100) pubHandle := evtapi.EventPublisherMetadataHandle(42) - unexpectedErr := fmt.Errorf("unexpected error") + unexpectedErr := errors.New("unexpected error") mockAPI.On("EvtOpenPublisherMetadata", publisherName, "").Return(pubHandle, nil).Once() mockAPI.On("EvtFormatMessage", pubHandle, eventHandle, uint(0), evtapi.EvtVariantValues(nil), uint(0)). @@ -219,7 +219,7 @@ func TestPublisherMetadataCache_FormatMessage_WithInvalidHandle(t *testing.T) { publisherName := "TestPublisher" eventHandle := evtapi.EventRecordHandle(100) - expectedErr := fmt.Errorf("publisher not found") + expectedErr := errors.New("publisher not found") mockAPI.On("EvtOpenPublisherMetadata", publisherName, "").Return(evtapi.EventPublisherMetadataHandle(0), expectedErr).Once() @@ -281,7 +281,7 @@ func TestPublisherMetadataCache_Flush_SkipsInvalidHandles(t *testing.T) { validHandle := evtapi.EventPublisherMetadataHandle(100) mockAPI.On("EvtOpenPublisherMetadata", validPublisher, "").Return(validHandle, nil).Once() - mockAPI.On("EvtOpenPublisherMetadata", invalidPublisher, "").Return(evtapi.EventPublisherMetadataHandle(0), fmt.Errorf("not found")).Once() + mockAPI.On("EvtOpenPublisherMetadata", invalidPublisher, "").Return(evtapi.EventPublisherMetadataHandle(0), errors.New("not found")).Once() cache.Get(validPublisher) cache.Get(invalidPublisher) @@ -306,7 +306,7 @@ func TestPublisherMetadataCache_Concurrency(_ *testing.T) { handle := evtapi.EventPublisherMetadataHandle(100 + i) mockAPI.On("EvtOpenPublisherMetadata", publisher, "").Return(handle, nil).Once() mockAPI.On("EvtFormatMessage", handle, eventHandle, uint(0), evtapi.EvtVariantValues(nil), uint(0)). - Return(fmt.Sprintf("Message from %s", publisher), nil).Times(100 * numGoroutinesPerPublisher) + Return("Message from "+publisher, nil).Times(100 * numGoroutinesPerPublisher) } var wg sync.WaitGroup diff --git a/pkg/util/winutil/eventlog/reporter/reporter.go b/pkg/util/winutil/eventlog/reporter/reporter.go index 75df635939b646..872969aeafeca4 100644 --- a/pkg/util/winutil/eventlog/reporter/reporter.go +++ b/pkg/util/winutil/eventlog/reporter/reporter.go @@ -8,6 +8,7 @@ package evtreporter import ( + "errors" "fmt" "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api" @@ -39,7 +40,7 @@ func New(channelName string, api evtapi.API) (Reporter, error) { var r reporter if api == nil { - return nil, fmt.Errorf("event log API is required") + return nil, errors.New("event log API is required") } r.eventLogAPI = api diff --git a/pkg/util/winutil/eventlog/subscription/helpers_test.go b/pkg/util/winutil/eventlog/subscription/helpers_test.go index 7e3a5365458a42..a67f2919c9bb3f 100644 --- a/pkg/util/winutil/eventlog/subscription/helpers_test.go +++ b/pkg/util/winutil/eventlog/subscription/helpers_test.go @@ -8,6 +8,7 @@ package evtsubscribe import ( + "errors" "fmt" "testing" @@ -33,11 +34,11 @@ func ReadNumEvents(t testing.TB, _ eventlog_test.APITester, sub PullSubscription } if count == numEvents { if !assert.Nil(t, events, "events should be nil when count is reached") { - return nil, fmt.Errorf("events should be nil when count is reached") + return nil, errors.New("events should be nil when count is reached") } } else { if !assert.NotNil(t, events, "events should not be nil if count is not reached %v/%v", count, numEvents) { - return nil, fmt.Errorf("events should not be nil") + return nil, errors.New("events should not be nil") } } if events != nil { @@ -51,7 +52,7 @@ func ReadNumEvents(t testing.TB, _ eventlog_test.APITester, sub PullSubscription for _, eventRecord := range eventRecords { if !assert.NotEqual(t, evtapi.EventRecordHandle(0), eventRecord.EventRecordHandle, "EventRecordHandle should not be NULL") { - return nil, fmt.Errorf("EventRecordHandle should not be NULL") + return nil, errors.New("EventRecordHandle should not be NULL") } } diff --git a/pkg/util/winutil/eventlog/subscription/subscription.go b/pkg/util/winutil/eventlog/subscription/subscription.go index 5a5bf4a68aebe5..3d114361491f23 100644 --- a/pkg/util/winutil/eventlog/subscription/subscription.go +++ b/pkg/util/winutil/eventlog/subscription/subscription.go @@ -235,7 +235,7 @@ func (q *pullSubscription) Running() bool { func (q *pullSubscription) Start() error { if q.started { - return fmt.Errorf("Query subscription is already started") + return errors.New("Query subscription is already started") } // Initialize bookmark (may load from saver or create new) @@ -346,7 +346,7 @@ func (q *pullSubscription) initializeBookmark() (evtbookmark.Bookmark, error) { func (q *pullSubscription) initializeBookmarkFromLatestEvent() (evtbookmark.Bookmark, error) { if q.bookmarkSaver == nil { // This function doesn't make sense if we're not going to save the bookmark - return nil, fmt.Errorf("bookmark saver not provided") + return nil, errors.New("bookmark saver not provided") } bookmark, err := evtbookmark.FromLatestEvent(q.eventLogAPI, q.channelPath, q.query) @@ -437,7 +437,7 @@ waitLoop: select { case q.eventsChannel <- q.parseEventRecordHandles(eventRecordHandles): case <-q.notifyStop: - q.err = fmt.Errorf("received stop signal") + q.err = errors.New("received stop signal") pkglog.Info(q.err) return } @@ -458,7 +458,7 @@ waitLoop: } } else if dwWait == (windows.WAIT_OBJECT_0 + 1) { // Stop event is set - q.err = fmt.Errorf("received stop signal") + q.err = errors.New("received stop signal") pkglog.Info(q.err) return } diff --git a/pkg/util/winutil/eventlog/subscription/subscription_test.go b/pkg/util/winutil/eventlog/subscription/subscription_test.go index 4eecee4b28c557..01c651c4374682 100644 --- a/pkg/util/winutil/eventlog/subscription/subscription_test.go +++ b/pkg/util/winutil/eventlog/subscription/subscription_test.go @@ -8,6 +8,7 @@ package evtsubscribe import ( + "errors" "flag" "fmt" "strings" @@ -43,7 +44,7 @@ func TestInvalidChannel(t *testing.T) { testerNames := eventlog_test.GetEnabledAPITesters() for _, tiName := range testerNames { - t.Run(fmt.Sprintf("%sAPI", tiName), func(t *testing.T) { + t.Run(tiName+"API", func(t *testing.T) { ti := eventlog_test.GetAPITesterByName(tiName, t) sub := NewPullSubscription( "nonexistentchannel", @@ -103,7 +104,7 @@ func getEventHandles(t testing.TB, ti eventlog_test.APITester, sub PullSubscript } count := uint(len(eventRecords)) if !assert.Equal(t, numEvents, count, fmt.Sprintf("Missing events, collected %d/%d events", count, numEvents)) { - return eventRecords, fmt.Errorf("Missing events") + return eventRecords, errors.New("Missing events") } return eventRecords, nil } @@ -112,7 +113,7 @@ func assertNoMoreEvents(t testing.TB, sub PullSubscription) error { select { case <-sub.GetEvents(): assert.Fail(t, "GetEvents should block when there are no more events!") - return fmt.Errorf("GetEvents did not block") + return errors.New("GetEvents did not block") default: return nil } @@ -974,7 +975,7 @@ func (s *GetEventsTestSuite) TestInitializeBookmark_LoadFailureFallbackToNow() { mockSaver := new(evtbookmark.MockSaver) // Simulate load error - mockSaver.On("Load").Return("", fmt.Errorf("simulated load error")).Once() + mockSaver.On("Load").Return("", errors.New("simulated load error")).Once() // After load fails, should fall back to creating bookmark from latest event mockSaver.On("Save", mock.MatchedBy(func(xml string) bool { return strings.Contains(xml, "RecordId=") @@ -1162,7 +1163,7 @@ func TestLaunchGetEventsTestSuite(t *testing.T) { testerNames := eventlog_test.GetEnabledAPITesters() for _, tiName := range testerNames { - t.Run(fmt.Sprintf("%sAPI", tiName), func(t *testing.T) { + t.Run(tiName+"API", func(t *testing.T) { var s GetEventsTestSuite s.channelPath = "dd-test-channel-subscription" s.eventSource = "dd-test-source-subscription" diff --git a/pkg/util/winutil/iisconfig/iisconfig.go b/pkg/util/winutil/iisconfig/iisconfig.go index 61af8bb5346456..e0a744dbda44f6 100644 --- a/pkg/util/winutil/iisconfig/iisconfig.go +++ b/pkg/util/winutil/iisconfig/iisconfig.go @@ -8,7 +8,7 @@ package iisconfig import ( "encoding/xml" - "fmt" + "errors" "os" "path/filepath" "strconv" @@ -63,7 +63,7 @@ func NewDynamicIISConfig() (*DynamicIISConfig, error) { // Start config watcher func (iiscfg *DynamicIISConfig) Start() error { if iiscfg == nil { - return fmt.Errorf("Null config") + return errors.New("Null config") } // set the filepath err := iiscfg.watcher.Add(iiscfg.path) diff --git a/pkg/util/xc/stub.go b/pkg/util/xc/stub.go index ce6c89f5e44631..b37c02c24467de 100644 --- a/pkg/util/xc/stub.go +++ b/pkg/util/xc/stub.go @@ -7,10 +7,10 @@ package xc -import "fmt" +import "errors" // GetSystemFreq grabs the system clock frequency // NOP on cross-compiled systems func GetSystemFreq() (int64, error) { - return 0, fmt.Errorf("frequency unavailable") + return 0, errors.New("frequency unavailable") } diff --git a/pkg/version/version.go b/pkg/version/version.go index 58a233fd62ad62..29ae7d07908fb3 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -6,6 +6,7 @@ package version import ( + "errors" "fmt" "regexp" "strconv" @@ -34,7 +35,7 @@ func New(version, commit string) (Version, error) { toks := versionRx.FindStringSubmatch(version) if len(toks) == 0 || toks[0] != version { // if regex didn't match or partially matched, raise an error - return Version{}, fmt.Errorf("Version string has wrong format") + return Version{}, errors.New("Version string has wrong format") } // split version info (group 1 in regexp) diff --git a/pkg/windowsdriver/olreader/olreader.go b/pkg/windowsdriver/olreader/olreader.go index 888d0936f03137..f660a2f54f0291 100644 --- a/pkg/windowsdriver/olreader/olreader.go +++ b/pkg/windowsdriver/olreader/olreader.go @@ -16,6 +16,7 @@ package olreader */ import "C" import ( + "errors" "fmt" "sync" "syscall" @@ -103,7 +104,7 @@ func (olr *OverlappedReader) Open(name string) error { //nolint:revive // TODO(WKIT) Fix revive linter func (olr *OverlappedReader) Read() error { if err := olr.createBuffers(); err != nil { - return fmt.Errorf("Failed to create overlapped read buffers") + return errors.New("Failed to create overlapped read buffers") } if err := olr.initiateReads(); err != nil { return err @@ -184,7 +185,7 @@ func (olr *OverlappedReader) initiateReads() error { if buf == nil { // would only happen if `createbuffers` not called, or // cleanbuffers was called. But ensure pointer is valid - return fmt.Errorf("Invalid buffer for read") + return errors.New("Invalid buffer for read") } /* * because this is an overlapped read, this will return ERROR_IO_PENDING diff --git a/pkg/windowsdriver/procmon/procmon.go b/pkg/windowsdriver/procmon/procmon.go index ba9e49ef51e74b..271407d97fd633 100644 --- a/pkg/windowsdriver/procmon/procmon.go +++ b/pkg/windowsdriver/procmon/procmon.go @@ -9,7 +9,7 @@ package procmon import ( - "fmt" + "errors" "unsafe" "github.com/DataDog/datadog-agent/pkg/util/winutil" @@ -76,10 +76,10 @@ var ( func NewWinProcMon(onStart chan *ProcessStartNotification, onStop chan *ProcessStopNotification, onError chan bool, bufsize, numbufs int) (*WinProcmon, error) { if bufsize == 0 { - return nil, fmt.Errorf("invalid buffer size") + return nil, errors.New("invalid buffer size") } if numbufs == 0 { - return nil, fmt.Errorf("invalid number of buffers") + return nil, errors.New("invalid number of buffers") } wp := &WinProcmon{ onStart: onStart, diff --git a/test/fakeintake/aggregator/connectionsAggregator.go b/test/fakeintake/aggregator/connectionsAggregator.go index c1af0e32216663..7580822cfcf798 100644 --- a/test/fakeintake/aggregator/connectionsAggregator.go +++ b/test/fakeintake/aggregator/connectionsAggregator.go @@ -6,7 +6,7 @@ package aggregator import ( - "fmt" + "errors" "time" agentmodel "github.com/DataDog/agent-payload/v5/process" @@ -47,7 +47,7 @@ func decodeCollectorConnection(b []byte) (cnx *agentmodel.CollectorConnections, } conns, ok := m.Body.(*agentmodel.CollectorConnections) if !ok { - return nil, fmt.Errorf("not protobuf process.CollectorConnections type") + return nil, errors.New("not protobuf process.CollectorConnections type") } return conns, nil } diff --git a/test/fakeintake/aggregator/containerLifecycleAggregator.go b/test/fakeintake/aggregator/containerLifecycleAggregator.go index e1994e6dd8c59a..7ae2d5ea95c2e3 100644 --- a/test/fakeintake/aggregator/containerLifecycleAggregator.go +++ b/test/fakeintake/aggregator/containerLifecycleAggregator.go @@ -24,9 +24,9 @@ type ContainerLifecyclePayload struct { func (p *ContainerLifecyclePayload) name() string { if container := p.Event.GetContainer(); container != nil { - return fmt.Sprintf("container_id://%s", container.GetContainerID()) + return "container_id://" + container.GetContainerID() } else if pod := p.Event.GetPod(); pod != nil { - return fmt.Sprintf("kubernetes_pod_uid://%s", pod.GetPodUID()) + return "kubernetes_pod_uid://" + pod.GetPodUID() } return "" } diff --git a/test/fakeintake/client/client.go b/test/fakeintake/client/client.go index 0843e22bcce762..dd875744f97da4 100644 --- a/test/fakeintake/client/client.go +++ b/test/fakeintake/client/client.go @@ -422,7 +422,7 @@ func (c *Client) GetLatestFlare() (flare.Flare, error) { } func (c *Client) getFakePayloads(endpoint string) (rawPayloads []api.Payload, err error) { - body, err := c.get(fmt.Sprintf("fakeintake/payloads?endpoint=%s", endpoint)) + body, err := c.get("fakeintake/payloads?endpoint=" + endpoint) if err != nil { return nil, err } @@ -437,7 +437,7 @@ func (c *Client) getFakePayloads(endpoint string) (rawPayloads []api.Payload, er // GetServerHealth fetches fakeintake health status and returns an error if // fakeintake is unhealthy func (c *Client) GetServerHealth() error { - resp, err := http.Get(fmt.Sprintf("%s/fakeintake/health", c.fakeIntakeURL)) + resp, err := http.Get(c.fakeIntakeURL + "/fakeintake/health") if err != nil { return err } @@ -450,7 +450,7 @@ func (c *Client) GetServerHealth() error { // ConfigureOverride sets a response override on the fakeintake server func (c *Client) ConfigureOverride(override api.ResponseOverride) error { - route := fmt.Sprintf("%s/fakeintake/configure/override", c.fakeIntakeURL) + route := c.fakeIntakeURL + "/fakeintake/configure/override" buf := new(bytes.Buffer) err := json.NewEncoder(buf).Encode(override) @@ -472,7 +472,7 @@ func (c *Client) ConfigureOverride(override api.ResponseOverride) error { // GetLastAPIKey returns the last apiKey sent with a payload to the intake func (c *Client) GetLastAPIKey() (string, error) { - resp, err := http.Get(fmt.Sprintf("%s/debug/lastAPIKey", c.fakeIntakeURL)) + resp, err := http.Get(c.fakeIntakeURL + "/debug/lastAPIKey") if err != nil { return "", err } @@ -675,7 +675,7 @@ func (c *Client) FlushServerAndResetAggregators() error { } func (c *Client) flushPayloads() error { - resp, err := http.Get(fmt.Sprintf("%s/fakeintake/flushPayloads", c.fakeIntakeURL)) + resp, err := http.Get(c.fakeIntakeURL + "/fakeintake/flushPayloads") if err != nil { return err } diff --git a/test/fakeintake/client/client_integration_test.go b/test/fakeintake/client/client_integration_test.go index 5c97109aa35891..05698daa022db6 100644 --- a/test/fakeintake/client/client_integration_test.go +++ b/test/fakeintake/client/client_integration_test.go @@ -107,7 +107,7 @@ func TestIntegrationClient(t *testing.T) { t.Log("post a test payload to fakeintake and check that the override is applied") resp, err := http.Post( - fmt.Sprintf("%s/totoro", fi.URL()), + fi.URL()+"/totoro", "text/plain", strings.NewReader("totoro|5|tag:valid,owner:mei"), ) diff --git a/test/fakeintake/client/client_test.go b/test/fakeintake/client/client_test.go index 03dab418333d55..1b1a7e97134896 100644 --- a/test/fakeintake/client/client_test.go +++ b/test/fakeintake/client/client_test.go @@ -7,6 +7,7 @@ package client import ( _ "embed" + "strconv" "time" "encoding/base64" @@ -83,7 +84,7 @@ func TestClient(t *testing.T) { Data: []byte(r.URL.Path), }, { - Data: []byte(fmt.Sprintf("%d", len(routes))), + Data: []byte(strconv.Itoa(len(routes))), }, { Data: []byte(routes[0]), diff --git a/test/fakeintake/cmd/client/cmd/routestats.go b/test/fakeintake/cmd/client/cmd/routestats.go index e580d941072f64..1931e406520345 100644 --- a/test/fakeintake/cmd/client/cmd/routestats.go +++ b/test/fakeintake/cmd/client/cmd/routestats.go @@ -6,9 +6,9 @@ package cmd import ( - "fmt" "log" "os" + "strconv" "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" @@ -30,7 +30,7 @@ func NewRouteStatsCommand(cl **client.Client) (cmd *cobra.Command) { table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Route", "Count"}) for route, count := range stats { - table.Append([]string{route, fmt.Sprintf("%d", count)}) + table.Append([]string{route, strconv.Itoa(count)}) } table.Render() }, diff --git a/test/fakeintake/server/server.go b/test/fakeintake/server/server.go index e99a18af832c26..cb73c509f7e3e3 100644 --- a/test/fakeintake/server/server.go +++ b/test/fakeintake/server/server.go @@ -305,7 +305,7 @@ func (fi *Server) IsRunning() bool { // Stop Gracefully stop the http server func (fi *Server) Stop() error { if !fi.IsRunning() { - return fmt.Errorf("server not running") + return errors.New("server not running") } defer close(fi.shutdown) defer fi.store.Close() @@ -545,7 +545,7 @@ func (fi *Server) handleGetPayloads(w http.ResponseWriter, req *http.Request) { } jsonResp, err = json.Marshal(resp) } else { - writeHTTPResponse(w, buildErrorResponse(fmt.Errorf("invalid route parameter"))) + writeHTTPResponse(w, buildErrorResponse(errors.New("invalid route parameter"))) return } diff --git a/test/fakeintake/server/serverstore/db.go b/test/fakeintake/server/serverstore/db.go index 3a9b4253c165b1..5aa1727f9d0220 100644 --- a/test/fakeintake/server/serverstore/db.go +++ b/test/fakeintake/server/serverstore/db.go @@ -7,7 +7,7 @@ package serverstore import ( "database/sql" - "fmt" + "errors" "log" "os" "time" @@ -138,7 +138,7 @@ func (s *sqlStore) SetLastAPIKey(_ string) { } func (s *sqlStore) GetLastAPIKey() (string, error) { - return "", fmt.Errorf("sqlstore does not track last APIKey") + return "", errors.New("sqlstore does not track last APIKey") } // AppendPayload adds a payload to the store and tries parsing and adding a dumped json to the parsed store diff --git a/test/fakeintake/server/serverstore/in_memory.go b/test/fakeintake/server/serverstore/in_memory.go index e8ad89ce98dc32..e6a3bc1bd1274a 100644 --- a/test/fakeintake/server/serverstore/in_memory.go +++ b/test/fakeintake/server/serverstore/in_memory.go @@ -6,7 +6,7 @@ package serverstore import ( - "fmt" + "errors" "log" "sync" "time" @@ -49,7 +49,7 @@ func (s *inMemoryStore) GetLastAPIKey() (string, error) { s.mutex.Lock() defer s.mutex.Unlock() if s.lastAPIKey == "" { - return "", fmt.Errorf("no apiKey sent") + return "", errors.New("no apiKey sent") } return s.lastAPIKey, nil } diff --git a/test/new-e2e/pkg/e2e/suite.go b/test/new-e2e/pkg/e2e/suite.go index 1b82b4c51ed121..32c5ef2a4a6767 100644 --- a/test/new-e2e/pkg/e2e/suite.go +++ b/test/new-e2e/pkg/e2e/suite.go @@ -748,7 +748,7 @@ func (bs *BaseSuite[Env]) TearDownSuite() { } if bs.IsWithinCI() && os.Getenv("REMOTE_STACK_CLEANING") == "true" { - fullStackName := fmt.Sprintf("organization/e2eci/%s", stackName) + fullStackName := "organization/e2eci/" + stackName bs.T().Logf("Remote stack cleaning enabled for stack %s", fullStackName) // If we are within CI, we let the stack be destroyed by the stackcleaner-worker service @@ -758,7 +758,7 @@ func (bs *BaseSuite[Env]) TearDownSuite() { if err != nil { bs.T().Logf("WARNING: Unable to destroy stack %s: %s", stackName, out) _, err := bs.datadogClient.PostEvent(&datadog.Event{ - Title: pointer.Ptr(fmt.Sprintf("Unable to destroy stack %s", stackName)), + Title: pointer.Ptr("Unable to destroy stack " + stackName), Text: pointer.Ptr(fmt.Sprintf("Unable to destroy stack %s: %s", stackName, out)), Tags: []string{"test:e2e", "stack:destroy", "stack_name:" + stackName, "service:stackcleaner-worker", "ci.job.name:" + os.Getenv("CI_JOB_NAME"), "ci.job.id:" + os.Getenv("CI_JOB_ID"), "ci.pipeline.id:" + os.Getenv("CI_PIPELINE_ID")}, }) diff --git a/test/new-e2e/pkg/e2e/suite_params.go b/test/new-e2e/pkg/e2e/suite_params.go index 1b6551592ed7a2..a603c2bb78be8d 100644 --- a/test/new-e2e/pkg/e2e/suite_params.go +++ b/test/new-e2e/pkg/e2e/suite_params.go @@ -6,8 +6,6 @@ package e2e import ( - "fmt" - "github.com/pulumi/pulumi/sdk/v3/go/pulumi" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners" @@ -60,7 +58,7 @@ func WithSkipDeleteOnFailure() SuiteOption { func WithProvisioner(provisioner provisioners.Provisioner) SuiteOption { return func(options *suiteParams) { if _, found := options.provisioners[provisioner.ID()]; found { - panic(fmt.Sprintf("Duplicate provider in test Suite: %s", provisioner.ID())) + panic("Duplicate provider in test Suite: " + provisioner.ID()) } if options.provisioners == nil { diff --git a/test/new-e2e/pkg/environments/dockerhost.go b/test/new-e2e/pkg/environments/dockerhost.go index 4410144b3f35bd..41e1b05b776697 100644 --- a/test/new-e2e/pkg/environments/dockerhost.go +++ b/test/new-e2e/pkg/environments/dockerhost.go @@ -39,11 +39,11 @@ var _ common.Diagnosable = (*DockerHost)(nil) func (e *DockerHost) Diagnose(outputDir string) (string, error) { diagnoses := []string{} if e.Docker == nil { - return "", fmt.Errorf("Docker component is not initialized") + return "", errors.New("Docker component is not initialized") } // add Agent diagnose if e.Agent == nil { - return "", fmt.Errorf("Agent component is not initialized") + return "", errors.New("Agent component is not initialized") } diagnoses = append(diagnoses, "==== Agent ====") @@ -51,7 +51,7 @@ func (e *DockerHost) Diagnose(outputDir string) (string, error) { if err != nil { return "", fmt.Errorf("failed to generate and download agent flare: %w", err) } - diagnoses = append(diagnoses, fmt.Sprintf("Flare archive downloaded to %s", dstPath)) + diagnoses = append(diagnoses, "Flare archive downloaded to "+dstPath) diagnoses = append(diagnoses, "\n") return strings.Join(diagnoses, "\n"), nil @@ -127,7 +127,7 @@ func (e *DockerHost) generateAndDownloadCoverageForContainer(outputDir string) ( re := regexp.MustCompile(`(?m)Coverage written to (.+)$`) matches := re.FindStringSubmatch(stdout) if len(matches) < 2 { - outStr, errs = updateErrorOutput(target, outStr, errs, fmt.Sprintf("output does not contain the path to the coverage folder, output: %s", stdout)) + outStr, errs = updateErrorOutput(target, outStr, errs, "output does not contain the path to the coverage folder, output: "+stdout) continue } @@ -154,7 +154,7 @@ func (e *DockerHost) generateAndDownloadCoverageForContainer(outputDir string) ( func (e *DockerHost) generateAndDownloadAgentFlare(outputDir string) (string, error) { if e.Agent == nil || e.Docker == nil { - return "", fmt.Errorf("Agent or Docker component is not initialized, cannot generate flare") + return "", errors.New("Agent or Docker component is not initialized, cannot generate flare") } // generate a flare, it will fallback to local flare generation if the running agent cannot be reached // discard error, flare command might return error if there is no intake, but the archive is still generated diff --git a/test/new-e2e/pkg/environments/host.go b/test/new-e2e/pkg/environments/host.go index 1d062d0f52c0e8..e00cb85ba0454f 100644 --- a/test/new-e2e/pkg/environments/host.go +++ b/test/new-e2e/pkg/environments/host.go @@ -41,7 +41,7 @@ var _ common.Diagnosable = (*Host)(nil) func (e *Host) Diagnose(outputDir string) (string, error) { diagnoses := []string{} if e.RemoteHost == nil { - return "", fmt.Errorf("RemoteHost component is not initialized") + return "", errors.New("RemoteHost component is not initialized") } // add Agent diagnose if e.Agent != nil { @@ -50,7 +50,7 @@ func (e *Host) Diagnose(outputDir string) (string, error) { if err != nil { return "", fmt.Errorf("failed to generate and download agent flare: %w", err) } - diagnoses = append(diagnoses, fmt.Sprintf("Flare archive downloaded to %s", dstPath)) + diagnoses = append(diagnoses, "Flare archive downloaded to "+dstPath) diagnoses = append(diagnoses, "\n") } @@ -59,7 +59,7 @@ func (e *Host) Diagnose(outputDir string) (string, error) { func generateAndDownloadAgentFlare(agent *components.RemoteHostAgent, host *components.RemoteHost, outputDir string) (string, error) { if agent == nil || host == nil { - return "", fmt.Errorf("Agent or RemoteHost component is not initialized, cannot generate flare") + return "", errors.New("Agent or RemoteHost component is not initialized, cannot generate flare") } // generate a flare, it will fallback to local flare generation if the running agent cannot be reached // todo skip uploading it to backend, requires further changes in agent executor @@ -179,7 +179,7 @@ func (e *Host) Coverage(outputDir string) (string, error) { re := regexp.MustCompile(`(?m)Coverage written to (.+)$`) matches := re.FindStringSubmatch(output) if len(matches) < 2 { - outStr, errs = updateErrorOutput(target, outStr, errs, fmt.Sprintf("output does not contain the path to the coverage folder, output: %s", output)) + outStr, errs = updateErrorOutput(target, outStr, errs, "output does not contain the path to the coverage folder, output: "+output) continue } err = e.RemoteHost.GetFolder(matches[1], filepath.Join(outputDir, filepath.Base(matches[1]))) @@ -187,7 +187,7 @@ func (e *Host) Coverage(outputDir string) (string, error) { outStr, errs = updateErrorOutput(target, outStr, errs, err.Error()) continue } - outStr = append(outStr, fmt.Sprintf("Downloaded coverage folder: %s", matches[1])) + outStr = append(outStr, "Downloaded coverage folder: "+matches[1]) } if len(errs) > 0 { diff --git a/test/new-e2e/pkg/environments/host_win.go b/test/new-e2e/pkg/environments/host_win.go index 66e50dedf0ad26..5520fabc09644b 100644 --- a/test/new-e2e/pkg/environments/host_win.go +++ b/test/new-e2e/pkg/environments/host_win.go @@ -6,6 +6,7 @@ package environments import ( + "errors" "fmt" "strings" @@ -36,7 +37,7 @@ func (e *WindowsHost) Init(_ common.Context) error { func (e *WindowsHost) Diagnose(outputDir string) (string, error) { diagnoses := []string{} if e.RemoteHost == nil { - return "", fmt.Errorf("RemoteHost component is not initialized") + return "", errors.New("RemoteHost component is not initialized") } // add Agent diagnose if e.Agent != nil { @@ -45,7 +46,7 @@ func (e *WindowsHost) Diagnose(outputDir string) (string, error) { if err != nil { return "", fmt.Errorf("failed to generate and download agent flare: %w", err) } - diagnoses = append(diagnoses, fmt.Sprintf("Flare archive downloaded to %s", dstPath)) + diagnoses = append(diagnoses, "Flare archive downloaded to "+dstPath) diagnoses = append(diagnoses, "\n") } diff --git a/test/new-e2e/pkg/environments/kubernetes.go b/test/new-e2e/pkg/environments/kubernetes.go index bae28d6c0c156c..9fa1c39417e1f0 100644 --- a/test/new-e2e/pkg/environments/kubernetes.go +++ b/test/new-e2e/pkg/environments/kubernetes.go @@ -36,13 +36,13 @@ func (e *Kubernetes) Diagnose(outputDir string) (string, error) { fmt.Println("Kubernetes Diagnose will be written to", outputDir) diagnoseOutput := []string{"==== Kubernetes Diagnose ===="} if e.KubernetesCluster == nil { - return "", fmt.Errorf("KubernetesCluster component is not initialized") + return "", errors.New("KubernetesCluster component is not initialized") } if e.Agent == nil { - return "", fmt.Errorf("Agent component is not initialized") + return "", errors.New("Agent component is not initialized") } if e.KubernetesCluster.KubernetesClient == nil { - return "", fmt.Errorf("KubernetesClient component is not initialized") + return "", errors.New("KubernetesClient component is not initialized") } ctx := context.Background() @@ -65,7 +65,7 @@ func (e *Kubernetes) Diagnose(outputDir string) (string, error) { diagnoseOutput = append(diagnoseOutput, fmt.Sprintf("Failed to generate and download agent flare: %s\n", err.Error())) continue } - diagnoseOutput = append(diagnoseOutput, fmt.Sprintf("Downloaded flare: %s", flarePath)) + diagnoseOutput = append(diagnoseOutput, "Downloaded flare: "+flarePath) } } @@ -87,7 +87,7 @@ func (e *Kubernetes) Diagnose(outputDir string) (string, error) { diagnoseOutput = append(diagnoseOutput, fmt.Sprintf("Failed to generate and download agent flare: %s\n", err.Error())) continue } - diagnoseOutput = append(diagnoseOutput, fmt.Sprintf("Downloaded flare: %s", flarePath)) + diagnoseOutput = append(diagnoseOutput, "Downloaded flare: "+flarePath) } } @@ -109,7 +109,7 @@ func (e *Kubernetes) Diagnose(outputDir string) (string, error) { diagnoseOutput = append(diagnoseOutput, fmt.Sprintf("Failed to generate and download cluster agent flare: %s\n", err.Error())) continue } - diagnoseOutput = append(diagnoseOutput, fmt.Sprintf("Downloaded flare: %s", flarePath)) + diagnoseOutput = append(diagnoseOutput, "Downloaded flare: "+flarePath) } } @@ -235,7 +235,7 @@ func (e *Kubernetes) Coverage(outputDir string) (string, error) { } else if linuxPods, err := e.KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", e.Agent.LinuxNodeAgent.LabelSelectors["app"]).String(), }); err != nil { - outStr = append(outStr, fmt.Sprintf("Failed to list linux pods: %s", err.Error())) + outStr = append(outStr, "Failed to list linux pods: "+err.Error()) } else { if len(linuxPods.Items) >= 1 { outStr = append(outStr, "==== Linux pods ====") @@ -256,7 +256,7 @@ func (e *Kubernetes) Coverage(outputDir string) (string, error) { } else if windowsPods, err := e.KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", e.Agent.WindowsNodeAgent.LabelSelectors["app"]).String(), }); err != nil { - outStr = append(outStr, fmt.Sprintf("Failed to list windows pods: %s", err.Error())) + outStr = append(outStr, "Failed to list windows pods: "+err.Error()) } else { if len(windowsPods.Items) >= 1 { outStr = append(outStr, "==== Windows pods ====") @@ -276,7 +276,7 @@ func (e *Kubernetes) Coverage(outputDir string) (string, error) { } else if clusterAgentPods, err := e.KubernetesCluster.Client().CoreV1().Pods("datadog").List(ctx, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", e.Agent.LinuxClusterAgent.LabelSelectors["app"]).String(), }); err != nil { - outStr = append(outStr, fmt.Sprintf("Failed to list cluster agent pods: %s", err.Error())) + outStr = append(outStr, "Failed to list cluster agent pods: "+err.Error()) } else { if len(clusterAgentPods.Items) >= 1 { outStr = append(outStr, "==== Cluster Agent pods ====") @@ -312,15 +312,15 @@ func (e *Kubernetes) generateAndDownloadCoverageForPod(pod v1.Pod, podType podTy re := regexp.MustCompile(`(?m)Coverage written to (.+)$`) matches := re.FindStringSubmatch(output) if len(matches) < 2 { - outStr, errs = updateErrorOutput(target, outStr, errs, fmt.Sprintf("output does not contain the path to the coverage folder, output: %s", output)) + outStr, errs = updateErrorOutput(target, outStr, errs, "output does not contain the path to the coverage folder, output: "+output) continue } - err = e.KubernetesCluster.KubernetesClient.DownloadFromPod(pod.Namespace, pod.Name, target.AgentName, matches[1], fmt.Sprintf("%s/coverage", outputDir)) + err = e.KubernetesCluster.KubernetesClient.DownloadFromPod(pod.Namespace, pod.Name, target.AgentName, matches[1], outputDir+"/coverage") if err != nil { outStr, errs = updateErrorOutput(target, outStr, errs, err.Error()) continue } - outStr = append(outStr, fmt.Sprintf("Downloaded coverage folder: %s", matches[1])) + outStr = append(outStr, "Downloaded coverage folder: "+matches[1]) } if len(errs) > 0 { return strings.Join(outStr, "\n"), errors.Join(errs...) diff --git a/test/new-e2e/pkg/provisioners/aws/host/host.go b/test/new-e2e/pkg/provisioners/aws/host/host.go index eae05996472cf1..9274a743f1471d 100644 --- a/test/new-e2e/pkg/provisioners/aws/host/host.go +++ b/test/new-e2e/pkg/provisioners/aws/host/host.go @@ -264,7 +264,7 @@ func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error // todo: add agent once updater installs agent on bootstrap env.Agent = nil } else if params.agentOptions != nil { - agentOptions := append(params.agentOptions, agentparams.WithTags([]string{fmt.Sprintf("stackid:%s", ctx.Stack())})) + agentOptions := append(params.agentOptions, agentparams.WithTags([]string{"stackid:" + ctx.Stack()})) agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...) if err != nil { return err diff --git a/test/new-e2e/pkg/provisioners/aws/host/windows/host.go b/test/new-e2e/pkg/provisioners/aws/host/windows/host.go index d8de8d88f5df83..0bae19394d7cce 100644 --- a/test/new-e2e/pkg/provisioners/aws/host/windows/host.go +++ b/test/new-e2e/pkg/provisioners/aws/host/windows/host.go @@ -222,7 +222,7 @@ func Run(ctx *pulumi.Context, env *environments.WindowsHost, awsEnv aws.Environm } if params.agentOptions != nil { - agentOptions := append(params.agentOptions, agentparams.WithTags([]string{fmt.Sprintf("stackid:%s", ctx.Stack())})) + agentOptions := append(params.agentOptions, agentparams.WithTags([]string{"stackid:" + ctx.Stack()})) agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...) if err != nil { return err diff --git a/test/new-e2e/pkg/provisioners/aws/kubernetes/eks.go b/test/new-e2e/pkg/provisioners/aws/kubernetes/eks.go index 97c14e8b28b0d1..1a5a69d77a2c08 100644 --- a/test/new-e2e/pkg/provisioners/aws/kubernetes/eks.go +++ b/test/new-e2e/pkg/provisioners/aws/kubernetes/eks.go @@ -8,7 +8,6 @@ package awskubernetes import ( "context" - "fmt" "github.com/DataDog/datadog-agent/test/e2e-framework/common/utils" "github.com/DataDog/datadog-agent/test/e2e-framework/components/datadog/agent" @@ -43,7 +42,7 @@ func eksDiagnoseFunc(ctx context.Context, stackName string) (string, error) { if err != nil { return "", err } - return fmt.Sprintf("Dumping EKS cluster state:\n%s", dumpResult), nil + return "Dumping EKS cluster state:\n" + dumpResult, nil } // EKSProvisioner creates a new provisioner diff --git a/test/new-e2e/pkg/provisioners/aws/kubernetes/kind.go b/test/new-e2e/pkg/provisioners/aws/kubernetes/kind.go index 063a88d8607fc2..c63523533e0319 100644 --- a/test/new-e2e/pkg/provisioners/aws/kubernetes/kind.go +++ b/test/new-e2e/pkg/provisioners/aws/kubernetes/kind.go @@ -9,7 +9,6 @@ package awskubernetes import ( "context" _ "embed" - "fmt" "strings" "github.com/DataDog/datadog-agent/test/e2e-framework/components/datadog/apps/etcd" @@ -84,7 +83,7 @@ func KindDiagnoseFunc(ctx context.Context, stackName string) (string, error) { if err != nil { return "", err } - return fmt.Sprintf("Dumping Kind cluster state:\n%s", dumpResult), nil + return "Dumping Kind cluster state:\n" + dumpResult, nil } // KindProvisioner creates a new provisioner diff --git a/test/new-e2e/pkg/provisioners/aws/kubernetes/kubernetes_dump.go b/test/new-e2e/pkg/provisioners/aws/kubernetes/kubernetes_dump.go index 68ca46cd940f7d..965e20ab8dfe42 100644 --- a/test/new-e2e/pkg/provisioners/aws/kubernetes/kubernetes_dump.go +++ b/test/new-e2e/pkg/provisioners/aws/kubernetes/kubernetes_dump.go @@ -9,6 +9,7 @@ import ( "bytes" "context" "encoding/base64" + "errors" "fmt" "io" "net" @@ -140,7 +141,7 @@ func dumpKindClusterState(ctx context.Context, name string) (ret string, err err instanceIP := instancesDescription.Reservations[0].Instances[0].PrivateIpAddress if instanceIP == nil { - return ret, fmt.Errorf("failed to get private IP of instance") + return ret, errors.New("failed to get private IP of instance") } auth := []ssh.AuthMethod{} diff --git a/test/new-e2e/pkg/provisioners/pulumi_provisioner.go b/test/new-e2e/pkg/provisioners/pulumi_provisioner.go index ab9e6e69ca03c7..503275b187fa1a 100644 --- a/test/new-e2e/pkg/provisioners/pulumi_provisioner.go +++ b/test/new-e2e/pkg/provisioners/pulumi_provisioner.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "reflect" + "strings" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" @@ -110,11 +111,11 @@ func (pp *PulumiProvisioner[Env]) ProvisionEnv(ctx context.Context, stackName st } func dumpRawResources(resources RawResources) string { - var res string + var builder strings.Builder for key, value := range resources { - res += fmt.Sprintf("%s: %s\n", key, value) + fmt.Fprintf(&builder, "%s: %s\n", key, value) } - return res + return builder.String() } // Diagnose runs the diagnose function if it is set diagnoseFunc diff --git a/test/new-e2e/pkg/runner/ci_profile.go b/test/new-e2e/pkg/runner/ci_profile.go index ece3b5514c0d12..87932ffe13e83f 100644 --- a/test/new-e2e/pkg/runner/ci_profile.go +++ b/test/new-e2e/pkg/runner/ci_profile.go @@ -68,7 +68,7 @@ func NewCIProfile() (Profile, error) { } if initOnly || preInitialized { - uniqueID = fmt.Sprintf("init-%s", os.Getenv("CI_PIPELINE_ID")) // We use pipeline ID for init only and pre-initialized jobs, to be able to share state + uniqueID = "init-" + os.Getenv("CI_PIPELINE_ID") // We use pipeline ID for init only and pre-initialized jobs, to be able to share state } // get environments from store diff --git a/test/new-e2e/pkg/runner/local_profile.go b/test/new-e2e/pkg/runner/local_profile.go index 63be7464fb18af..adfb8097909609 100644 --- a/test/new-e2e/pkg/runner/local_profile.go +++ b/test/new-e2e/pkg/runner/local_profile.go @@ -6,6 +6,7 @@ package runner import ( + "errors" "fmt" "os" "os/user" @@ -64,7 +65,7 @@ func getLocalOutputDir() string { func getConfigFilePath() (string, error) { homeDir, err := os.UserHomeDir() if err != nil { - return "", fmt.Errorf("unable to get the home dir") + return "", errors.New("unable to get the home dir") } configPath := path.Join(homeDir, ".test_infra_config.yaml") diff --git a/test/new-e2e/pkg/runner/profile.go b/test/new-e2e/pkg/runner/profile.go index d6be02431777ab..f7cc1aa1bbe73e 100644 --- a/test/new-e2e/pkg/runner/profile.go +++ b/test/new-e2e/pkg/runner/profile.go @@ -191,7 +191,7 @@ func (p baseProfile) CreateOutputSubDir(subdirectory string) (string, error) { } // Create final output directory // Use MkdirTemp to avoid name collisions between parallel runs - outputDir, err := os.MkdirTemp(parentDir, fmt.Sprintf("%s_*", finalDir)) + outputDir, err := os.MkdirTemp(parentDir, finalDir+"_*") if err != nil { return "", err } diff --git a/test/new-e2e/pkg/utils/e2e/client/agent_client.go b/test/new-e2e/pkg/utils/e2e/client/agent_client.go index fe9e0fc9927b83..f308f68765d8ac 100644 --- a/test/new-e2e/pkg/utils/e2e/client/agent_client.go +++ b/test/new-e2e/pkg/utils/e2e/client/agent_client.go @@ -175,7 +175,7 @@ func makeStatusEndpointRequest(params *agentclientparams.Params, host *Host, url return nil, true, err } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", params.AuthToken)) + req.Header.Set("Authorization", "Bearer "+params.AuthToken) return req, true, nil } @@ -196,10 +196,10 @@ func ensureAuthToken(params *agentclientparams.Params, host *Host) error { func fetchAuthTokenCommand(authTokenPath string, osFamily osComp.Family) string { if osFamily == osComp.WindowsFamily { - return fmt.Sprintf("Get-Content -Raw -Path %s", authTokenPath) + return "Get-Content -Raw -Path " + authTokenPath } - return fmt.Sprintf("sudo cat %s", authTokenPath) + return "sudo cat " + authTokenPath } func waitForReadyTimeout(commandRunner *agentCommandRunner, timeout time.Duration) error { diff --git a/test/new-e2e/pkg/utils/e2e/client/ecs/session-manager-plugin.go b/test/new-e2e/pkg/utils/e2e/client/ecs/session-manager-plugin.go index 02348e0f587b5c..a7546c9d6d24d0 100644 --- a/test/new-e2e/pkg/utils/e2e/client/ecs/session-manager-plugin.go +++ b/test/new-e2e/pkg/utils/e2e/client/ecs/session-manager-plugin.go @@ -22,6 +22,7 @@ package ecs import ( + "errors" "fmt" "math/rand" @@ -94,7 +95,7 @@ func execute(s *session.Session, logger log.T) (string, error) { return string(payload), nil } case <-stopChannel: - return "", fmt.Errorf("Failed to initialize session") + return "", errors.New("Failed to initialize session") } return "", nil } diff --git a/test/new-e2e/pkg/utils/e2e/client/host.go b/test/new-e2e/pkg/utils/e2e/client/host.go index 25a91a9fa77cfe..df136c9ccea45f 100644 --- a/test/new-e2e/pkg/utils/e2e/client/host.go +++ b/test/new-e2e/pkg/utils/e2e/client/host.go @@ -266,7 +266,7 @@ func (h *Host) FileExists(path string) (bool, error) { func (h *Host) EnsureFileIsReadable(path string) error { // ensure the file is readable on the remote host if h.osFamily != oscomp.WindowsFamily { - _, err := h.Execute(fmt.Sprintf("sudo chmod +r %s", path)) + _, err := h.Execute("sudo chmod +r " + path) if err != nil { return fmt.Errorf("failed to make file readable: %w", err) } @@ -378,13 +378,13 @@ func (h *Host) FindFiles(name string) ([]string, error) { h.context.T().Logf("Finding files with name %s", name) switch h.osFamily { case oscomp.WindowsFamily: - out, err := h.Execute(fmt.Sprintf("Get-ChildItem -Path C:\\ -Filter %s", name)) + out, err := h.Execute("Get-ChildItem -Path C:\\ -Filter " + name) if err != nil { return nil, err } return strings.Split(out, "\n"), nil case oscomp.LinuxFamily: - out, err := h.Execute(fmt.Sprintf("sudo find / -name %s", name)) + out, err := h.Execute("sudo find / -name " + name) if err != nil { return nil, err } @@ -481,7 +481,7 @@ func (h *Host) GetAgentConfigFolder() (string, error) { if err != nil { return out, err } - return fmt.Sprintf("%s\\Datadog", strings.TrimSpace(out)), nil + return strings.TrimSpace(out) + "\\Datadog", nil case oscomp.LinuxFamily: return "/etc/datadog-agent", nil case oscomp.MacOSFamily: @@ -636,7 +636,7 @@ func buildCommandFactory(osFamily oscomp.Family) buildCommandFn { } func buildCommandOnWindows(command string, envVar EnvVar) string { - cmd := "" + var builder strings.Builder // Set $ErrorActionPreference to 'Stop' to cause PowerShell to stop on an error instead // of the default 'Continue' behavior. @@ -653,10 +653,10 @@ func buildCommandOnWindows(command string, envVar EnvVar) string { // // To ignore errors, prefix command with $ErrorActionPreference='Continue' or use -ErrorAction Continue // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_preference_variables#erroractionpreference - cmd += "$ErrorActionPreference='Stop'; " + builder.WriteString("$ErrorActionPreference='Stop'; ") for envName, envValue := range envVar { - cmd += fmt.Sprintf("$env:%s='%s'; ", envName, envValue) + fmt.Fprintf(&builder, "$env:%s='%s'; ", envName, envValue) } // By default, powershell will just exit with 0 or 1, so we call exit to preserve // the exit code of the command provided by the caller. @@ -665,7 +665,7 @@ func buildCommandOnWindows(command string, envVar EnvVar) string { // // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_automatic_variables?#lastexitcode // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_powershell_exe?#-command - cmd += fmt.Sprintf("$LASTEXITCODE=0; %s; if (-not $?) { exit $LASTEXITCODE }", command) + fmt.Fprintf(&builder, "$LASTEXITCODE=0; %s; if (-not $?) { exit $LASTEXITCODE }", command) // NOTE: Do not add more commands after the command provided by the caller. // // `$ErrorActionPreference`='Stop' only applies to PowerShell commands, not to @@ -676,16 +676,16 @@ func buildCommandOnWindows(command string, envVar EnvVar) string { // caller, we will need to find a way to ensure that the exit code of the command // provided by the caller is preserved. - return cmd + return builder.String() } func buildCommandOnLinuxAndMacOS(command string, envVar EnvVar) string { - cmd := "" + var builder strings.Builder for envName, envValue := range envVar { - cmd += fmt.Sprintf("%s='%s' ", envName, envValue) + fmt.Fprintf(&builder, "%s='%s' ", envName, envValue) } - cmd += command - return cmd + builder.WriteString(command) + return builder.String() } // convertToForwardSlashOnWindows replaces backslashes in the path with forward slashes for Windows remote hosts. diff --git a/test/new-e2e/pkg/utils/e2e/client/host_cache.go b/test/new-e2e/pkg/utils/e2e/client/host_cache.go index 76f0a9ddc5fdce..8fa2790d6b35c9 100644 --- a/test/new-e2e/pkg/utils/e2e/client/host_cache.go +++ b/test/new-e2e/pkg/utils/e2e/client/host_cache.go @@ -6,6 +6,7 @@ package client import ( + "errors" "fmt" "github.com/DataDog/datadog-agent/test/e2e-framework/components" @@ -19,7 +20,7 @@ const ( type unimplementedHostCache struct{} func (c *unimplementedHostCache) Get(_ string, _ string) error { - return fmt.Errorf("not implemented") + return errors.New("not implemented") } func hostArtifactsClientFactory(sshExecutor *sshExecutor, osFlavor oscomp.Flavor, cloudProvider components.CloudProviderIdentifier, architecture oscomp.Architecture) HostArtifactClient { @@ -160,7 +161,7 @@ type aptPkgManager struct { } func (c *aptPkgManager) install(pkgName string) error { - _, err := c.sshExecutor.Execute(fmt.Sprintf("sudo apt-get install -y %s", pkgName)) + _, err := c.sshExecutor.Execute("sudo apt-get install -y " + pkgName) return err } @@ -169,7 +170,7 @@ type yumPkgManager struct { } func (c *yumPkgManager) install(pkgName string) error { - _, err := c.sshExecutor.Execute(fmt.Sprintf("sudo yum install -y %s", pkgName)) + _, err := c.sshExecutor.Execute("sudo yum install -y " + pkgName) return err } @@ -178,6 +179,6 @@ type zypperPkgManager struct { } func (c *zypperPkgManager) install(pkgName string) error { - _, err := c.sshExecutor.Execute(fmt.Sprintf("sudo zypper install -y %s", pkgName)) + _, err := c.sshExecutor.Execute("sudo zypper install -y " + pkgName) return err } diff --git a/test/new-e2e/pkg/utils/infra/stack_manager.go b/test/new-e2e/pkg/utils/infra/stack_manager.go index bc43090f650c90..3d45c67d4fd8b7 100644 --- a/test/new-e2e/pkg/utils/infra/stack_manager.go +++ b/test/new-e2e/pkg/utils/infra/stack_manager.go @@ -379,14 +379,14 @@ func (sm *StackManager) destroyStack(ctx context.Context, stackID string, stack _, destroyErr = stack.Destroy(destroyContext, progressStreamsDestroyOption, optdestroy.DebugLogging(loggingOptions)) cancel() if destroyErr == nil { - sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack destroy", stackID), "", []string{"operation:destroy", "result:ok", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", downCount)}) + sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack destroy", stackID), "", []string{"operation:destroy", "result:ok", "stack:" + stack.Name(), fmt.Sprintf("retries:%d", downCount)}) return nil } // handle timeout contextCauseErr := context.Cause(destroyContext) if errors.Is(contextCauseErr, context.DeadlineExceeded) { - sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack destroy", stackID), "", []string{"operation:destroy", fmt.Sprintf("stack:%s", stack.Name())}) + sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack destroy", stackID), "", []string{"operation:destroy", "stack:" + stack.Name()}) fmt.Fprint(logger, "Timeout during stack destroy, trying to cancel stack's operation\n") err := cancelStack(stack, defaultStackCancelTimeout) if err != nil { @@ -395,7 +395,7 @@ func (sm *StackManager) destroyStack(ctx context.Context, stackID string, stack } } - sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack destroy", stackID), destroyErr.Error(), []string{"operation:destroy", "result:fail", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", downCount)}) + sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack destroy", stackID), destroyErr.Error(), []string{"operation:destroy", "result:fail", "stack:" + stack.Name(), fmt.Sprintf("retries:%d", downCount)}) if downCount > stackDestroyMaxRetry { fmt.Fprintf(logger, "Giving up on error during stack destroy: %v\n", destroyErr) @@ -421,19 +421,19 @@ func (sm *StackManager) removeStack(ctx context.Context, stackID string, stack * err = stack.Workspace().RemoveStack(removeContext, stack.Name()) cancel() if err == nil { - sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack remove", stackID), "", []string{"operation:remove", "result:ok", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", removeCount)}) + sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack remove", stackID), "", []string{"operation:remove", "result:ok", "stack:" + stack.Name(), fmt.Sprintf("retries:%d", removeCount)}) return nil } // handle timeout contextCauseErr := context.Cause(removeContext) if errors.Is(contextCauseErr, context.DeadlineExceeded) { - sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack remove", stackID), "", []string{"operation:remove", fmt.Sprintf("stack:%s", stack.Name())}) + sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack remove", stackID), "", []string{"operation:remove", "stack:" + stack.Name()}) fmt.Fprint(logger, "Timeout during stack remove\n") continue } - sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack remove", stackID), err.Error(), []string{"operation:remove", "result:fail", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", removeCount)}) + sendEventToDatadog(ddEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack remove", stackID), err.Error(), []string{"operation:remove", "result:fail", "stack:" + stack.Name(), fmt.Sprintf("retries:%d", removeCount)}) if removeCount > stackRemoveMaxRetry { fmt.Fprintf(logger, "[WARNING] Giving up on error during stack remove: %v\nThe stack resources are destroyed, but we failed removing the stack state.\n", err) @@ -508,14 +508,14 @@ func (sm *StackManager) getStack(ctx context.Context, name string, deployFunc pu // early return on success if upError == nil { - sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack up", name), "", []string{"operation:up", "result:ok", fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", upCount)}) + sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack up", name), "", []string{"operation:up", "result:ok", "stack:" + stack.Name(), fmt.Sprintf("retries:%d", upCount)}) break } // handle timeout contextCauseErr := context.Cause(upCtx) if errors.Is(contextCauseErr, context.DeadlineExceeded) { - sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack up", name), "", []string{"operation:up", fmt.Sprintf("stack:%s", stack.Name())}) + sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : timeout on Pulumi stack up", name), "", []string{"operation:up", "stack:" + stack.Name()}) fmt.Fprint(logger, "Timeout during stack up, trying to cancel stack's operation\n") err = cancelStack(stack, params.CancelTimeout) if err != nil { @@ -525,7 +525,7 @@ func (sm *StackManager) getStack(ctx context.Context, name string, deployFunc pu } retryStrategy, changedOpts := sm.GetRetryStrategyFrom(upError, upCount) - sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", name), upError.Error(), []string{"operation:up", "result:fail", fmt.Sprintf("retry:%s", retryStrategy), fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", upCount)}) + sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", name), upError.Error(), []string{"operation:up", "result:fail", fmt.Sprintf("retry:%s", retryStrategy), "stack:" + stack.Name(), fmt.Sprintf("retries:%d", upCount)}) switch retryStrategy { case ReUp: diff --git a/test/new-e2e/pkg/utils/infra/stack_manager_test.go b/test/new-e2e/pkg/utils/infra/stack_manager_test.go index d38b7729646fcf..c95c703865ed73 100644 --- a/test/new-e2e/pkg/utils/infra/stack_manager_test.go +++ b/test/new-e2e/pkg/utils/infra/stack_manager_test.go @@ -247,7 +247,7 @@ func TestStackManager(t *testing.T) { if stackUpCounter > 3 { return nil } - return fmt.Errorf("error during container init: error setting cgroup config for procHooks process: unable to freeze: unknown") + return errors.New("error during container init: error setting cgroup config for procHooks process: unable to freeze: unknown") }, WithLogWriter(mockWriter), WithDatadogEventSender(mockDatadogEventSender), @@ -280,7 +280,7 @@ func TestStackManager(t *testing.T) { if stackUpCounter > 3 { return nil } - return fmt.Errorf("random error") + return errors.New("random error") }, WithLogWriter(mockWriter), WithDatadogEventSender(mockDatadogEventSender), diff --git a/test/new-e2e/system-probe/connector/main.go b/test/new-e2e/system-probe/connector/main.go index ffa9e1cd3aa3fd..3bbecd85159414 100644 --- a/test/new-e2e/system-probe/connector/main.go +++ b/test/new-e2e/system-probe/connector/main.go @@ -197,11 +197,11 @@ func run() (err error) { func buildMetric(cinfo connectorInfo, failType, result string) datadogV2.MetricPayload { tags := []string{ - fmt.Sprintf("result:%s", result), - fmt.Sprintf("connection_type:%s", cinfo.connectorType), + "result:" + result, + "connection_type:" + cinfo.connectorType, } if failType != "" { - tags = append(tags, fmt.Sprintf("error:%s", failType)) + tags = append(tags, "error:"+failType) } return datadogV2.MetricPayload{ Series: []datadogV2.MetricSeries{ diff --git a/test/new-e2e/system-probe/connector/sshtools/communicator.go b/test/new-e2e/system-probe/connector/sshtools/communicator.go index ce99fb8629f914..56391766cf97cc 100644 --- a/test/new-e2e/system-probe/connector/sshtools/communicator.go +++ b/test/new-e2e/system-probe/connector/sshtools/communicator.go @@ -11,6 +11,7 @@ import ( "net" "os" "path/filepath" + "strconv" "strings" "golang.org/x/crypto/ssh" @@ -68,7 +69,7 @@ func (c *Communicator) Connect(ctx context.Context) (err error) { c.reset() - client, err := c.dial(ctx, "tcp", net.JoinHostPort(c.host, fmt.Sprint(c.config.Port)), &c.config.ClientConfig) + client, err := c.dial(ctx, "tcp", net.JoinHostPort(c.host, strconv.Itoa(c.config.Port)), &c.config.ClientConfig) if err != nil { return fmt.Errorf("ssh: dial failed: %w", err) } diff --git a/test/new-e2e/system-probe/errors.go b/test/new-e2e/system-probe/errors.go index 297f80bb0184a5..79b20548f7a996 100644 --- a/test/new-e2e/system-probe/errors.go +++ b/test/new-e2e/system-probe/errors.go @@ -9,11 +9,11 @@ package systemprobe import ( - "fmt" "log" "os" "path" "regexp" + "strconv" "strings" "time" @@ -131,7 +131,7 @@ type retryHandler struct { func errorMetric(errType string) datadogV2.MetricPayload { tags := []string{ - fmt.Sprintf("error:%s", errType), + "error:" + errType, } return datadogV2.MetricPayload{ Series: []datadogV2.MetricSeries{ @@ -219,7 +219,7 @@ func storeNumberOfRetriesForCITags(retries int) error { } defer f.Close() - _, err = f.WriteString(fmt.Sprintf("%d", retries)) + _, err = f.WriteString(strconv.Itoa(retries)) return err } diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go index 84ca4ff4a8c5aa..e3417ec4e49604 100644 --- a/test/new-e2e/system-probe/system-probe-test-env.go +++ b/test/new-e2e/system-probe/system-probe-test-env.go @@ -106,7 +106,7 @@ func outputsToFile(output auto.OutputMap) error { } switch v := value.Value.(type) { case string: - if _, err := f.WriteString(fmt.Sprintf("%s\n", v)); err != nil { + if _, err := f.WriteString(v + "\n"); err != nil { return fmt.Errorf("failed to write string to file %q: %v", stackOutputs, err) } default: @@ -175,7 +175,7 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* apiKey := getEnv("DD_API_KEY", "") if opts.RunAgent && apiKey == "" { - return nil, fmt.Errorf("No API Key for datadog-agent provided") + return nil, errors.New("No API Key for datadog-agent provided") } ciJob := getEnv("CI_JOB_ID", "") @@ -310,22 +310,22 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* "source:pulumi", "repository:datadog/datadog-agent", "team:ebpf-platform", - fmt.Sprintf("vm.name:%s", pulumiError.vmName), - fmt.Sprintf("vm.arch:%s", pulumiError.arch), - fmt.Sprintf("vm.command:%s", pulumiError.vmCommand), + "vm.name:" + pulumiError.vmName, + "vm.arch:" + pulumiError.arch, + "vm.command:" + pulumiError.vmCommand, }, } if ciJob != "" { - event.Tags = append(event.Tags, fmt.Sprintf("ci.job.id:%s", ciJob)) + event.Tags = append(event.Tags, "ci.job.id:"+ciJob) } if ciPipeline != "" { - event.Tags = append(event.Tags, fmt.Sprintf("ci.pipeline.id:%s", ciPipeline)) + event.Tags = append(event.Tags, "ci.pipeline.id:"+ciPipeline) } if ciBranch != "" { - event.Tags = append(event.Tags, fmt.Sprintf("ci.branch:%s", ciBranch)) + event.Tags = append(event.Tags, "ci.branch:"+ciBranch) } if err = metric.SubmitExecutionEvent(event); err != nil { diff --git a/test/new-e2e/system-probe/test-runner/main.go b/test/new-e2e/system-probe/test-runner/main.go index 46b080d67e40de..d632448e0b68c3 100644 --- a/test/new-e2e/system-probe/test-runner/main.go +++ b/test/new-e2e/system-probe/test-runner/main.go @@ -96,7 +96,7 @@ func getEBPFBuildDir() (string, error) { arch = "arm64" } - return fmt.Sprintf("pkg/ebpf/bytecode/build/%s", arch), nil + return "pkg/ebpf/bytecode/build/" + arch, nil } func glob(dir, filePattern string, filterFn func(path string) bool) ([]string, error) { @@ -273,8 +273,8 @@ func testPass(testConfig *testConfig, props map[string]string) error { return fmt.Errorf("could not get relative path for %s: %w", testsuite, err) } junitfilePrefix := strings.ReplaceAll(pkg, "/", "-") - xmlpath := filepath.Join(xmlDir, fmt.Sprintf("%s.xml", junitfilePrefix)) - jsonpath := filepath.Join(jsonDir, fmt.Sprintf("%s.json", junitfilePrefix)) + xmlpath := filepath.Join(xmlDir, junitfilePrefix+".xml") + jsonpath := filepath.Join(jsonDir, junitfilePrefix+".json") testsuiteArgs := []string{testsuite} if testContainer != nil { diff --git a/test/new-e2e/system-probe/test-runner/testcontainer.go b/test/new-e2e/system-probe/test-runner/testcontainer.go index e637b1a40cf379..1d128864c25a90 100644 --- a/test/new-e2e/system-probe/test-runner/testcontainer.go +++ b/test/new-e2e/system-probe/test-runner/testcontainer.go @@ -58,7 +58,7 @@ func (ctc *testContainer) start() error { "/etc/group:/etc/group", "/opt/datadog-agent/embedded/:/opt/datadog-agent/embedded/", "/opt/kmt-ramfs:/opt/kmt-ramfs", - fmt.Sprintf("%s:/opt/bpf", ctc.bpfDir), + ctc.bpfDir + ":/opt/bpf", } for _, mount := range mounts { args = append(args, "-v", mount) diff --git a/test/new-e2e/system-probe/vm-metrics/vm-metrics.go b/test/new-e2e/system-probe/vm-metrics/vm-metrics.go index 9d2d0edae409a1..25796b33eb39b7 100644 --- a/test/new-e2e/system-probe/vm-metrics/vm-metrics.go +++ b/test/new-e2e/system-probe/vm-metrics/vm-metrics.go @@ -114,7 +114,7 @@ func (d *domainMetrics) collectDomainMemoryStatInfo(l libvirtInterface) error { return fmt.Errorf("failed to get memory stats: %w", err) } - tags := []string{fmt.Sprintf("os:%s", d.osID)} + tags := []string{"os:" + d.osID} for _, stat := range memStats { if statString, ok := memStatTagToName[libvirt.DomainMemoryStatTags(stat.Tag)]; ok { if stat.Tag == int32(libvirt.DomainMemoryStatMajorFault) { diff --git a/test/new-e2e/tests/agent-configuration/api/api_test.go b/test/new-e2e/tests/agent-configuration/api/api_test.go index 480dc0be26d669..5739886f22139f 100644 --- a/test/new-e2e/tests/agent-configuration/api/api_test.go +++ b/test/new-e2e/tests/agent-configuration/api/api_test.go @@ -68,7 +68,7 @@ func (endpointInfo *agentEndpointInfo) httpRequest(authtoken string) (*http.Requ return nil, err } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", authtoken)) + req.Header.Set("Authorization", "Bearer "+authtoken) return req, nil } diff --git a/test/new-e2e/tests/agent-log-pipelines/linux-log/file-tailing/file_tailing_test.go b/test/new-e2e/tests/agent-log-pipelines/linux-log/file-tailing/file_tailing_test.go index b4c3c3f711fdfa..700944d33d98a2 100644 --- a/test/new-e2e/tests/agent-log-pipelines/linux-log/file-tailing/file_tailing_test.go +++ b/test/new-e2e/tests/agent-log-pipelines/linux-log/file-tailing/file_tailing_test.go @@ -70,7 +70,7 @@ func (s *LinuxFakeintakeSuite) BeforeTest(suiteName, testName string) { }, 2*time.Minute, 10*time.Second) // Create a new log folder location - s.Env().RemoteHost.MustExecute(fmt.Sprintf("sudo mkdir -p %s", utils.LinuxLogsFolderPath)) + s.Env().RemoteHost.MustExecute("sudo mkdir -p " + utils.LinuxLogsFolderPath) } func (s *LinuxFakeintakeSuite) TearDownSuite() { @@ -106,7 +106,7 @@ func (s *LinuxFakeintakeSuite) TestLinuxLogTailing() { func (s *LinuxFakeintakeSuite) testLogCollection() { t := s.T() // Create a new log file with permissions accessible to the agent - s.Env().RemoteHost.MustExecute(fmt.Sprintf("sudo touch %s", logFilePath)) + s.Env().RemoteHost.MustExecute("sudo touch " + logFilePath) // Adjust permissions of new log file before log generation output, err := s.Env().RemoteHost.Execute(fmt.Sprintf("sudo chmod +r %s && echo true", logFilePath)) @@ -122,8 +122,8 @@ func (s *LinuxFakeintakeSuite) testLogCollection() { // Given expected tags expectedTags := []string{ - fmt.Sprintf("filename:%s", logFileName), - fmt.Sprintf("dirname:%s", utils.LinuxLogsFolderPath), + "filename:" + logFileName, + "dirname:" + utils.LinuxLogsFolderPath, } // Check intake for new logs utils.CheckLogsExpected(s.T(), s.Env().FakeIntake, "hello", "hello-world", expectedTags) diff --git a/test/new-e2e/tests/agent-log-pipelines/listener/listener_test.go b/test/new-e2e/tests/agent-log-pipelines/listener/listener_test.go index 726172dc634137..77038409135993 100644 --- a/test/new-e2e/tests/agent-log-pipelines/listener/listener_test.go +++ b/test/new-e2e/tests/agent-log-pipelines/listener/listener_test.go @@ -7,12 +7,12 @@ package listener import ( _ "embed" - "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-log-pipelines/utils" "strings" "testing" "time" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-log-pipelines/utils" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" @@ -102,7 +102,7 @@ func assertLogsReceived( require.NoError(t, err) ipAddress = strings.TrimSpace(ipAddress) t.Logf("Logger-app IP address: %s", ipAddress) - sourceHostTag := fmt.Sprintf("source_host:%s", ipAddress) + sourceHostTag := "source_host:" + ipAddress // Command to execute inside the container cmd := []string{ "/usr/local/bin/send-message.sh", diff --git a/test/new-e2e/tests/agent-log-pipelines/utils/file_tailing_utils.go b/test/new-e2e/tests/agent-log-pipelines/utils/file_tailing_utils.go index 96df39a304061d..b5e4575c192657 100644 --- a/test/new-e2e/tests/agent-log-pipelines/utils/file_tailing_utils.go +++ b/test/new-e2e/tests/agent-log-pipelines/utils/file_tailing_utils.go @@ -8,6 +8,7 @@ package utils import ( "encoding/json" + "errors" "fmt" "strings" "testing" @@ -69,7 +70,7 @@ func AppendLog(ls LogsTestSuite, logFileName, content string, recurrence int) { logPath = fmt.Sprintf("%s\\%s", WindowsLogsFolderPath, logFileName) t.Logf("Log path: %s", logPath) - checkCmd = fmt.Sprintf("type %s", logPath) + checkCmd = "type " + logPath assert.EventuallyWithT(t, func(c *assert.CollectT) { // AppendFile instead of echo since echo introduce encoding into the file. bytes, err := ls.Env().RemoteHost.AppendFile(osStr, logPath, []byte(logContent)) @@ -88,7 +89,7 @@ func AppendLog(ls LogsTestSuite, logFileName, content string, recurrence int) { t.Logf("Writing %d bytes to %s", bytes, logPath) } }, 1*time.Minute, 5*time.Second) - checkCmd = fmt.Sprintf("sudo cat %s", logPath) + checkCmd = "sudo cat " + logPath } assert.EventuallyWithT(t, func(c *assert.CollectT) { @@ -250,7 +251,7 @@ func FetchAndFilterLogs(fakeIntake *components.FakeIntake, service, content stri } if len(names) == 0 { - return nil, fmt.Errorf("the fake intake has no logs for any services") + return nil, errors.New("the fake intake has no logs for any services") } var contains bool @@ -324,7 +325,7 @@ func CleanUp(ls LogsTestSuite) { if ls.IsDevMode() { switch ls.Env().RemoteHost.OSFamily { default: // default is linux - ls.Env().RemoteHost.MustExecute(fmt.Sprintf("sudo rm -rf %s", LinuxLogsFolderPath)) + ls.Env().RemoteHost.MustExecute("sudo rm -rf " + LinuxLogsFolderPath) checkCmd = fmt.Sprintf("ls %s 2>/dev/null || echo 'Files do not exist'", LinuxLogsFolderPath) case os.WindowsFamily: if ls.IsDevMode() { diff --git a/test/new-e2e/tests/agent-log-pipelines/windows-log/file-tailing/file_tailing_test.go b/test/new-e2e/tests/agent-log-pipelines/windows-log/file-tailing/file_tailing_test.go index 725c5a650fef48..9fe29db7bd1b14 100644 --- a/test/new-e2e/tests/agent-log-pipelines/windows-log/file-tailing/file_tailing_test.go +++ b/test/new-e2e/tests/agent-log-pipelines/windows-log/file-tailing/file_tailing_test.go @@ -65,7 +65,7 @@ func (s *WindowsFakeintakeSuite) BeforeTest(suiteName, testName string) { } // If logs are found, print their content for debugging if !assert.Empty(c, logs, "Logs were found when none were expected") { - cat, _ := s.Env().RemoteHost.Execute(fmt.Sprintf("type %s", logFilePath)) + cat, _ := s.Env().RemoteHost.Execute("type " + logFilePath) s.T().Logf("Logs detected when none were expected: %v", cat) } }, 2*time.Minute, 10*time.Second) @@ -125,8 +125,8 @@ func (s *WindowsFakeintakeSuite) testLogCollection() { // Given expected tags expectedTags := []string{ - fmt.Sprintf("filename:%s", logFileName), - fmt.Sprintf("dirname:%s", utils.WindowsLogsFolderPath), + "filename:" + logFileName, + "dirname:" + utils.WindowsLogsFolderPath, } // Check intake for new logs utils.CheckLogsExpected(s.T(), s.Env().FakeIntake, "hello", "hello-world", expectedTags) diff --git a/test/new-e2e/tests/agent-platform/common/agent_behaviour.go b/test/new-e2e/tests/agent-platform/common/agent_behaviour.go index 2661cdfb90a96b..25319216b1508a 100644 --- a/test/new-e2e/tests/agent-platform/common/agent_behaviour.go +++ b/test/new-e2e/tests/agent-platform/common/agent_behaviour.go @@ -8,6 +8,7 @@ package common import ( "encoding/json" + "errors" "fmt" "regexp" "strings" @@ -273,7 +274,7 @@ func CheckApmEnabled(t *testing.T, client *TestClient) { if !assert.EventuallyWithT(tt, func(c *assert.CollectT) { boundPort, _ = AssertPortBoundByService(c, client, "tcp", 8126, "trace-agent", apmProcessName) }, 1*time.Minute, 500*time.Millisecond) { - err := fmt.Errorf("port tcp/8126 should be bound when APM is enabled") + err := errors.New("port tcp/8126 should be bound when APM is enabled") if client.Host.OSFamily == componentos.LinuxFamily { err = fmt.Errorf("%w\n%s", err, ReadJournalCtl(t, client, "trace-loader\\|trace-agent\\|datadog-agent-trace")) } @@ -393,7 +394,7 @@ func CheckSystemProbeBehavior(t *testing.T, client *TestClient) { for _, file := range files { file = strings.TrimSpace(file) - ddMetadata, err := client.Host.Execute(fmt.Sprintf("readelf -p dd_metadata %s", file)) + ddMetadata, err := client.Host.Execute("readelf -p dd_metadata " + file) require.NoError(tt, err, "readelf should not error, file is %s", file) require.Contains(tt, ddMetadata, archMetadata, "invalid arch metadata") } @@ -418,7 +419,7 @@ func CheckADPEnabled(t *testing.T, client *TestClient) { if !assert.EventuallyWithT(tt, func(c *assert.CollectT) { boundPort, _ = AssertPortBoundByService(c, client, "udp", 8125, "agent-data-plane", "agent-data-plane") }, 1*time.Minute, 500*time.Millisecond) { - err := fmt.Errorf("port udp/8125 should be bound when ADP is enabled") + err := errors.New("port udp/8125 should be bound when ADP is enabled") if client.Host.OSFamily == componentos.LinuxFamily { err = fmt.Errorf("%w\n%s", err, ReadJournalCtl(t, client, "agent-data-plane\\|datadog-agent-data-plane")) } diff --git a/test/new-e2e/tests/agent-platform/common/agent_install.go b/test/new-e2e/tests/agent-platform/common/agent_install.go index 3db31b294c5ff6..92656888eb307b 100644 --- a/test/new-e2e/tests/agent-platform/common/agent_install.go +++ b/test/new-e2e/tests/agent-platform/common/agent_install.go @@ -22,7 +22,7 @@ import ( // CheckInstallation run tests to check the installation of the agent func CheckInstallation(t *testing.T, client *TestClient) { t.Run("example config file", func(tt *testing.T) { - exampleFilePath := client.Helper.GetConfigFolder() + fmt.Sprintf("%s.example", client.Helper.GetConfigFileName()) + exampleFilePath := client.Helper.GetConfigFolder() + client.Helper.GetConfigFileName() + ".example" _, err := client.FileManager.FileExists(exampleFilePath) require.NoError(tt, err, "Example config file should be present") diff --git a/test/new-e2e/tests/agent-platform/common/bound-port/boundport.go b/test/new-e2e/tests/agent-platform/common/bound-port/boundport.go index abb164a288527e..2c8b2d953097e4 100644 --- a/test/new-e2e/tests/agent-platform/common/bound-port/boundport.go +++ b/test/new-e2e/tests/agent-platform/common/bound-port/boundport.go @@ -93,7 +93,7 @@ func parseHostPort(address string) (string, int, error) { localAddress := matches[hostPortRegexAddressIdx] localPort, err := strconv.Atoi(matches[hostPortRegexPortIdx]) if err != nil { - return "", 0, fmt.Errorf("invalid address: port is not a number") + return "", 0, errors.New("invalid address: port is not a number") } return localAddress, localPort, nil } diff --git a/test/new-e2e/tests/agent-platform/common/bound-port/unix.go b/test/new-e2e/tests/agent-platform/common/bound-port/unix.go index 633cc37622fdff..ce93ac34e5ca80 100644 --- a/test/new-e2e/tests/agent-platform/common/bound-port/unix.go +++ b/test/new-e2e/tests/agent-platform/common/bound-port/unix.go @@ -6,7 +6,7 @@ package boundport import ( - "fmt" + "errors" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" ) @@ -28,5 +28,5 @@ func boundPortsUnix(host *components.RemoteHost) ([]BoundPort, error) { return FromSs(out) } - return nil, fmt.Errorf("no ss found") + return nil, errors.New("no ss found") } diff --git a/test/new-e2e/tests/agent-platform/common/file-manager/unix.go b/test/new-e2e/tests/agent-platform/common/file-manager/unix.go index f1ab3c4ed7b9b1..32ce7583c28aa5 100644 --- a/test/new-e2e/tests/agent-platform/common/file-manager/unix.go +++ b/test/new-e2e/tests/agent-platform/common/file-manager/unix.go @@ -6,6 +6,7 @@ package filemanager import ( + "errors" "fmt" "io/fs" "strings" @@ -46,13 +47,13 @@ func (e *dummyentry) Name() string { return e.name } func (e *dummyentry) IsDir() bool { - panic(fmt.Errorf("not implemented")) + panic(errors.New("not implemented")) } func (e *dummyentry) Type() fs.FileMode { - panic(fmt.Errorf("not implemented")) + panic(errors.New("not implemented")) } func (e *dummyentry) Info() (fs.FileInfo, error) { - panic(fmt.Errorf("not implemented")) + panic(errors.New("not implemented")) } // ReadDir only returns the Name of files in path, not stat modes diff --git a/test/new-e2e/tests/agent-platform/common/pkg-manager/apt.go b/test/new-e2e/tests/agent-platform/common/pkg-manager/apt.go index e704ca455ebb9e..d0bec12f126423 100644 --- a/test/new-e2e/tests/agent-platform/common/pkg-manager/apt.go +++ b/test/new-e2e/tests/agent-platform/common/pkg-manager/apt.go @@ -6,8 +6,6 @@ package pkgmanager import ( - "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" ) @@ -25,5 +23,5 @@ func NewApt(host *components.RemoteHost) *Apt { // Remove call remove from apt func (s *Apt) Remove(pkg string) (string, error) { - return s.host.Execute(fmt.Sprintf("sudo apt remove -q -y %s", pkg)) + return s.host.Execute("sudo apt remove -q -y " + pkg) } diff --git a/test/new-e2e/tests/agent-platform/common/svc-manager/service.go b/test/new-e2e/tests/agent-platform/common/svc-manager/service.go index 1c95c9d45df2c9..3485d9bee48b94 100644 --- a/test/new-e2e/tests/agent-platform/common/svc-manager/service.go +++ b/test/new-e2e/tests/agent-platform/common/svc-manager/service.go @@ -32,7 +32,7 @@ func (s *Service) Status(service string) (string, error) { } // systemctl status returns 0 even if the service is not running - if strings.Contains(status, fmt.Sprintf("%s stop", service)) { + if strings.Contains(status, service+" stop") { return status, fmt.Errorf("service %s is not running", service) } return status, nil diff --git a/test/new-e2e/tests/agent-platform/common/svc-manager/upstart.go b/test/new-e2e/tests/agent-platform/common/svc-manager/upstart.go index 294c7434a659f9..ac7bb946e7ea78 100644 --- a/test/new-e2e/tests/agent-platform/common/svc-manager/upstart.go +++ b/test/new-e2e/tests/agent-platform/common/svc-manager/upstart.go @@ -31,7 +31,7 @@ func (s *Upstart) Status(service string) (string, error) { return status, err } // upstart status returns 0 even if the service is not running - if strings.Contains(status, fmt.Sprintf("%s stop", service)) { + if strings.Contains(status, service+" stop") { return status, fmt.Errorf("service %s is not running", service) } return status, nil diff --git a/test/new-e2e/tests/agent-platform/ddot/ddot_install_test.go b/test/new-e2e/tests/agent-platform/ddot/ddot_install_test.go index 3fae7cb2d1dbd1..f681d5ca6daa47 100644 --- a/test/new-e2e/tests/agent-platform/ddot/ddot_install_test.go +++ b/test/new-e2e/tests/agent-platform/ddot/ddot_install_test.go @@ -67,7 +67,7 @@ func TestDDOTInstallScript(t *testing.T) { vmOpts = append(vmOpts, ec2.WithInstanceType(instanceType)) } - t.Run(fmt.Sprintf("test ddot install on %s", osDesc.String()), func(tt *testing.T) { + t.Run("test ddot install on "+osDesc.String(), func(tt *testing.T) { tt.Parallel() tt.Logf("Testing %s", osDesc.Version) slice := strings.Split(osDesc.Version, "-") diff --git a/test/new-e2e/tests/agent-platform/install/install.go b/test/new-e2e/tests/agent-platform/install/install.go index bde9a544bc4a9c..f64f50cba0640d 100644 --- a/test/new-e2e/tests/agent-platform/install/install.go +++ b/test/new-e2e/tests/agent-platform/install/install.go @@ -30,7 +30,7 @@ func Unix(t *testing.T, client ExecutorWithRetry, options ...installparams.Optio testEnvVars := []string{} testEnvVars = append(testEnvVars, fmt.Sprintf("TESTING_APT_URL=s3.amazonaws.com/apttesting.datad0g.com/datadog-agent/pipeline-%v-a%v", params.PipelineID, params.MajorVersion)) if params.TestingKeysURL != "" { - testEnvVars = append(testEnvVars, fmt.Sprintf("TESTING_KEYS_URL=%s", params.TestingKeysURL)) + testEnvVars = append(testEnvVars, "TESTING_KEYS_URL="+params.TestingKeysURL) } // apt testing repo // TESTING_APT_REPO_VERSION="pipeline-xxxxx-ay y" @@ -41,7 +41,7 @@ func Unix(t *testing.T, client ExecutorWithRetry, options ...installparams.Optio testEnvVars = append(testEnvVars, fmt.Sprintf(`TESTING_YUM_VERSION_PATH="testing/pipeline-%v-a%v/%v"`, params.PipelineID, params.MajorVersion, params.MajorVersion)) commandLine = strings.Join(testEnvVars, " ") } else { - commandLine = fmt.Sprintf("DD_AGENT_MAJOR_VERSION=%s", params.MajorVersion) + commandLine = "DD_AGENT_MAJOR_VERSION=" + params.MajorVersion } if params.Flavor != "" { diff --git a/test/new-e2e/tests/agent-platform/package-signing/package_signing_test.go b/test/new-e2e/tests/agent-platform/package-signing/package_signing_test.go index 40cab7c5ae9ced..066adde48e52fe 100644 --- a/test/new-e2e/tests/agent-platform/package-signing/package_signing_test.go +++ b/test/new-e2e/tests/agent-platform/package-signing/package_signing_test.go @@ -70,7 +70,7 @@ func TestPackageSigningComponent(t *testing.T) { e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake( awshost.WithEC2InstanceOptions(ec2.WithOS(osDesc)), )), - e2e.WithStackName(fmt.Sprintf("pkgSigning-%s", osDesc.Flavor.String())), + e2e.WithStackName("pkgSigning-"+osDesc.Flavor.String()), ) }) } @@ -92,7 +92,7 @@ func (is *packageSigningTestSuite) TestPackageSigning() { keys := []string{"DATADOG_RPM_KEY_E09422B3.public", "DATADOG_RPM_KEY_CURRENT.public", "DATADOG_RPM_KEY_FD4BF915.public", "DATADOG_RPM_KEY_E09422B3.public"} for _, key := range keys { is.Env().RemoteHost.MustExecute(fmt.Sprintf("sudo curl --retry 5 -o \"/tmp/%s\" \"https://keys.datadoghq.com/%s\"", key, key)) - is.Env().RemoteHost.MustExecute(fmt.Sprintf("sudo rpm --import /tmp/%s", key)) + is.Env().RemoteHost.MustExecute("sudo rpm --import /tmp/" + key) } } diff --git a/test/new-e2e/tests/agent-platform/rpm/rpm_test.go b/test/new-e2e/tests/agent-platform/rpm/rpm_test.go index e491ebf7fe820b..ba819cbfa9cd36 100644 --- a/test/new-e2e/tests/agent-platform/rpm/rpm_test.go +++ b/test/new-e2e/tests/agent-platform/rpm/rpm_test.go @@ -57,7 +57,7 @@ func TestRpmScript(t *testing.T) { vmOpts = append(vmOpts, ec2.WithInstanceType(instanceType)) } - t.Run(fmt.Sprintf("test RPM package on %s", platforms.PrettifyOsDescriptor(osDesc)), func(tt *testing.T) { + t.Run("test RPM package on "+platforms.PrettifyOsDescriptor(osDesc), func(tt *testing.T) { tt.Parallel() tt.Logf("Testing %s", platforms.PrettifyOsDescriptor(osDesc)) slice := strings.Split(osDesc.Version, "-") diff --git a/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go index a232abc46ee615..ce7557e5a19be9 100644 --- a/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go +++ b/test/new-e2e/tests/agent-platform/step-by-step/step_by_step_test.go @@ -84,7 +84,7 @@ func TestStepByStepScript(t *testing.T) { vmOpts = append(vmOpts, ec2.WithInstanceType(instanceType)) } - t.Run(fmt.Sprintf("test step by step on %s", platforms.PrettifyOsDescriptor(osDesc)), func(tt *testing.T) { + t.Run("test step by step on "+platforms.PrettifyOsDescriptor(osDesc), func(tt *testing.T) { tt.Parallel() tt.Logf("Testing %s", platforms.PrettifyOsDescriptor(osDesc)) slice := strings.Split(osDesc.Version, "-") diff --git a/test/new-e2e/tests/agent-platform/upgrade/upgrade_test.go b/test/new-e2e/tests/agent-platform/upgrade/upgrade_test.go index c683f8f1a698f8..1051b19a446f36 100644 --- a/test/new-e2e/tests/agent-platform/upgrade/upgrade_test.go +++ b/test/new-e2e/tests/agent-platform/upgrade/upgrade_test.go @@ -60,7 +60,7 @@ func TestUpgradeScript(t *testing.T) { for _, osDesc := range osDescriptors { osDesc := osDesc - t.Run(fmt.Sprintf("test upgrade on %s", platforms.PrettifyOsDescriptor(osDesc)), func(tt *testing.T) { + t.Run("test upgrade on "+platforms.PrettifyOsDescriptor(osDesc), func(tt *testing.T) { tt.Parallel() tt.Logf("Testing %s", platforms.PrettifyOsDescriptor(osDesc)) diff --git a/test/new-e2e/tests/agent-runtimes/forwarder/nss_failover_test.go b/test/new-e2e/tests/agent-runtimes/forwarder/nss_failover_test.go index 7d7de0cb4aef44..55a41d8aea6d99 100644 --- a/test/new-e2e/tests/agent-runtimes/forwarder/nss_failover_test.go +++ b/test/new-e2e/tests/agent-runtimes/forwarder/nss_failover_test.go @@ -256,7 +256,7 @@ func (v *multiFakeIntakeSuite) requireIntakeIsUsed(intake *fi.Client, intakeMaxW assert.NotEmpty(t, metricNames) // check logs - v.Env().Host.MustExecute(fmt.Sprintf("echo 'totoro' >> %s", logFile)) + v.Env().Host.MustExecute("echo 'totoro' >> " + logFile) logs, err := intake.FilterLogs(logService) require.NoError(t, err) assert.NotEmpty(t, logs) @@ -289,7 +289,7 @@ func (v *multiFakeIntakeSuite) requireIntakeNotUsed(intake *fi.Client, intakeMax intake.FlushServerAndResetAggregators() // write a log - v.Env().Host.MustExecute(fmt.Sprintf("echo 'totoro' >> %s", logFile)) + v.Env().Host.MustExecute("echo 'totoro' >> " + logFile) // send a flare v.Env().Agent.Client.Flare(agentclient.WithArgs([]string{"--email", "e2e@test.com", "--send"})) diff --git a/test/new-e2e/tests/agent-subcommands/check/check_common_test.go b/test/new-e2e/tests/agent-subcommands/check/check_common_test.go index 2658aeff618530..d7ae5a6bb5db78 100644 --- a/test/new-e2e/tests/agent-subcommands/check/check_common_test.go +++ b/test/new-e2e/tests/agent-subcommands/check/check_common_test.go @@ -8,7 +8,7 @@ package check import ( _ "embed" - "fmt" + "strconv" "github.com/stretchr/testify/assert" @@ -64,7 +64,7 @@ func (v *baseCheckSuite) TestCheckRate() { func (v *baseCheckSuite) TestCheckTimes() { times := 10 - check := v.Env().Agent.Client.Check(agentclient.WithArgs([]string{"hello", "--check-times", fmt.Sprint(times), "--json"})) + check := v.Env().Agent.Client.Check(agentclient.WithArgs([]string{"hello", "--check-times", strconv.Itoa(times), "--json"})) data := checkutils.ParseJSONOutput(v.T(), []byte(check)) diff --git a/test/new-e2e/tests/agent-subcommands/dogstatsdreplay/dogstatsdreplay_common_test.go b/test/new-e2e/tests/agent-subcommands/dogstatsdreplay/dogstatsdreplay_common_test.go index d6e8871dd4355d..87dc6f5f11fca3 100644 --- a/test/new-e2e/tests/agent-subcommands/dogstatsdreplay/dogstatsdreplay_common_test.go +++ b/test/new-e2e/tests/agent-subcommands/dogstatsdreplay/dogstatsdreplay_common_test.go @@ -40,7 +40,7 @@ func (v *baseDogstatsdReplaySuite) TestReplayWithTagEnrichment() { v.uploadCaptureFile(metricsWithTagsCapture, captureFile) output := v.Env().RemoteHost.MustExecute( - fmt.Sprintf("sudo datadog-agent dogstatsd-replay -f %s", captureFile)) + "sudo datadog-agent dogstatsd-replay -f " + captureFile) assert.Contains(v.T(), output, "replay done") assert.NotContains(v.T(), output, "Unable to load state API error") diff --git a/test/new-e2e/tests/apm/tests.go b/test/new-e2e/tests/apm/tests.go index a5ae783482840d..03865ae5464a30 100644 --- a/test/new-e2e/tests/apm/tests.go +++ b/test/new-e2e/tests/apm/tests.go @@ -6,7 +6,6 @@ package apm import ( - "fmt" "slices" "strings" "testing" @@ -88,7 +87,7 @@ func testTracesHaveContainerTag(t *testing.T, c *assert.CollectT, service string assert.NoError(c, err) assert.NotEmpty(c, traces) t.Logf("Got %d apm traces", len(traces)) - assert.True(c, hasContainerTag(traces, fmt.Sprintf("container_name:%s", service)), "got traces: %v", traces) + assert.True(c, hasContainerTag(traces, "container_name:"+service), "got traces: %v", traces) } func testProcessTraces(c *assert.CollectT, intake *components.FakeIntake, processTags string) { @@ -131,7 +130,7 @@ func testStatsHaveContainerTags(t *testing.T, c *assert.CollectT, service string if ss.Service == service { assert.NotEmpty(c, s.ContainerID, "ContainerID should not be empty. Got Stats: %v", stats) assert.NotEmpty(c, s.Tags, "Container Tags should not be empty. Got Stats: %v", stats) - assert.Contains(c, s.Tags, fmt.Sprintf("container_name:%s", service)) + assert.Contains(c, s.Tags, "container_name:"+service) } } } diff --git a/test/new-e2e/tests/containers/base_test.go b/test/new-e2e/tests/containers/base_test.go index bea555700465a2..7d7f337a99f88a 100644 --- a/test/new-e2e/tests/containers/base_test.go +++ b/test/new-e2e/tests/containers/base_test.go @@ -105,7 +105,7 @@ func (suite *baseSuite[Env]) testMetric(args *testMetricArgs) { }) if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ - Title: pointer.Ptr(fmt.Sprintf("testMetric %s", prettyMetricQuery)), + Title: pointer.Ptr("testMetric " + prettyMetricQuery), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result @@ -234,7 +234,7 @@ func (suite *baseSuite[Env]) testLog(args *testLogArgs) { }) if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ - Title: pointer.Ptr(fmt.Sprintf("testLog %s", prettyLogQuery)), + Title: pointer.Ptr("testLog " + prettyLogQuery), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result @@ -363,7 +363,7 @@ func (suite *baseSuite[Env]) testCheckRun(args *testCheckRunArgs) { }) if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ - Title: pointer.Ptr(fmt.Sprintf("testCheckRun %s", prettyCheckRunQuery)), + Title: pointer.Ptr("testCheckRun " + prettyCheckRunQuery), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result @@ -476,7 +476,7 @@ func (suite *baseSuite[Env]) testEvent(args *testEventArgs) { }) if _, err := suite.DatadogClient().PostEvent(&datadog.Event{ - Title: pointer.Ptr(fmt.Sprintf("testEvent %s", prettyEventQuery)), + Title: pointer.Ptr("testEvent " + prettyEventQuery), Text: pointer.Ptr(fmt.Sprintf(`%%%%%% ### Result diff --git a/test/new-e2e/tests/cws/common.go b/test/new-e2e/tests/cws/common.go index 08e583068ab6f9..8adea381c99d35 100644 --- a/test/new-e2e/tests/cws/common.go +++ b/test/new-e2e/tests/cws/common.go @@ -98,16 +98,16 @@ func (a *agentSuite) Test03OpenSignal() { assert.NoErrorf(a.T(), err, "failed to delete agent rule %s", agentRuleID) } if dirname != "" { - a.Env().RemoteHost.MustExecute(fmt.Sprintf("rm -r %s", dirname)) + a.Env().RemoteHost.MustExecute("rm -r " + dirname) } }() // Create temporary directory tempDir := a.Env().RemoteHost.MustExecute("mktemp -d") dirname = strings.TrimSuffix(tempDir, "\n") - filepath := fmt.Sprintf("%s/secret", dirname) - desc := fmt.Sprintf("e2e test rule %s", a.testID) - agentRuleName := fmt.Sprintf("new_e2e_agent_rule_%s", a.testID) + filepath := dirname + "/secret" + desc := "e2e test rule " + a.testID + agentRuleName := "new_e2e_agent_rule_" + a.testID // Create CWS Agent rule rule := fmt.Sprintf("open.file.path == \"%s\"", filepath) @@ -142,7 +142,7 @@ func (a *agentSuite) Test03OpenSignal() { // Push policies a.Env().RemoteHost.MustExecute(fmt.Sprintf("sudo cp temp.txt %s && rm temp.txt", policiesPath)) - policiesFile := a.Env().RemoteHost.MustExecute(fmt.Sprintf("cat %s", policiesPath)) + policiesFile := a.Env().RemoteHost.MustExecute("cat " + policiesPath) require.Contains(a.T(), policiesFile, desc, "The policies file should contain the created rule") // Reload policies @@ -162,7 +162,7 @@ func (a *agentSuite) Test03OpenSignal() { // Check app event assert.EventuallyWithT(a.T(), func(c *assert.CollectT) { // Trigger agent event - a.Env().RemoteHost.MustExecute(fmt.Sprintf("touch %s", filepath)) + a.Env().RemoteHost.MustExecute("touch " + filepath) testRuleEvent(c, a, agentRuleName, func(e *api.RuleEvent) { assert.Equal(c, "open", e.Evt.Name, "event name should be open") assert.Equal(c, filepath, e.File.Path, "file path does not match") @@ -180,7 +180,7 @@ func (a *agentSuite) Test03OpenSignal() { if !assert.NotNil(c, signal) { return } - assert.Contains(c, signal.Tags, fmt.Sprintf("rule_id:%s", strings.ToLower(agentRuleName)), "unable to find rule_id tag") + assert.Contains(c, signal.Tags, "rule_id:"+strings.ToLower(agentRuleName), "unable to find rule_id tag") if !assert.Contains(c, signal.AdditionalProperties, "attributes", "unable to find 'attributes' field in signal") { return } @@ -323,7 +323,7 @@ func testCwsEnabled(t assert.TestingT, ts testSuite) { } func testSelftestsEvent(t assert.TestingT, ts testSuite, extraValidations ...eventValidationCb[*api.SelftestsEvent]) { - query := fmt.Sprintf("rule_id:self_test host:%s", ts.Hostname()) + query := "rule_id:self_test host:" + ts.Hostname() selftestsEvent, err := api.GetAppEvent[api.SelftestsEvent](ts.Client(), query) if !assert.NoErrorf(t, err, "could not get selftests event for host %s", ts.Hostname()) { return diff --git a/test/new-e2e/tests/cws/fargate_test.go b/test/new-e2e/tests/cws/fargate_test.go index 1a0008969c1c92..34634bef34b46f 100644 --- a/test/new-e2e/tests/cws/fargate_test.go +++ b/test/new-e2e/tests/cws/fargate_test.go @@ -157,9 +157,9 @@ func TestECSFargate(t *testing.T) { "trace", "selftests", "--exec", - fmt.Sprintf("--exec.path=%s", execFilePath), + "--exec.path=" + execFilePath, "--open", - fmt.Sprintf("--open.path=%s", openFilePath), + "--open.path=" + openFilePath, }), DependsOn: ecsx.TaskDefinitionContainerDependencyArray{ ecsx.TaskDefinitionContainerDependencyArgs{ diff --git a/test/new-e2e/tests/cws/windows_test.go b/test/new-e2e/tests/cws/windows_test.go index 2d436e65398ffe..6acd816965aab3 100644 --- a/test/new-e2e/tests/cws/windows_test.go +++ b/test/new-e2e/tests/cws/windows_test.go @@ -119,7 +119,7 @@ func (a *agentSuiteWindows) Test03CreateFileSignal() { assert.NoErrorf(a.T(), err, "failed to delete agent rule %s", agentRuleID) } if dirname != "" { - a.Env().RemoteHost.MustExecute(fmt.Sprintf("rm -r %s", dirname)) + a.Env().RemoteHost.MustExecute("rm -r " + dirname) } }() @@ -127,9 +127,9 @@ func (a *agentSuiteWindows) Test03CreateFileSignal() { cmd := "New-Item -ItemType Directory -Path $env:TEMP -Name ([Guid]::NewGuid().Guid) | Select-Object -ExpandProperty FullName" tempDir := a.Env().RemoteHost.MustExecute(cmd) dirname = strings.TrimSpace(tempDir) - filepath := fmt.Sprintf("%s\\secret", dirname) - desc := fmt.Sprintf("e2e test rule %s", a.testID) - agentRuleName := fmt.Sprintf("new_e2e_agent_rule_%s", a.testID) + filepath := dirname + "\\secret" + desc := "e2e test rule " + a.testID + agentRuleName := "new_e2e_agent_rule_" + a.testID // Create CWS Agent rule rule := fmt.Sprintf(`create.file.path == "%s"`, filepath) @@ -167,7 +167,7 @@ func (a *agentSuiteWindows) Test03CreateFileSignal() { // Push policies a.Env().RemoteHost.MustExecute(fmt.Sprintf("cp temp.txt '%s'; rm temp.txt", policiesPathWindows)) - policiesFile := a.Env().RemoteHost.MustExecute(fmt.Sprintf("cat %s", policiesPathWindows)) + policiesFile := a.Env().RemoteHost.MustExecute("cat " + policiesPathWindows) require.Contains(a.T(), policiesFile, desc, "The policies file should contain the created rule") // Reload policies @@ -206,7 +206,7 @@ func (a *agentSuiteWindows) Test03CreateFileSignal() { if !assert.NotNil(c, signal) { return } - assert.Contains(c, signal.Tags, fmt.Sprintf("rule_id:%s", strings.ToLower(agentRuleName)), "unable to find rule_id tag") + assert.Contains(c, signal.Tags, "rule_id:"+strings.ToLower(agentRuleName), "unable to find rule_id tag") if !assert.Contains(c, signal.AdditionalProperties, "attributes", "unable to find 'attributes' field in signal") { return } diff --git a/test/new-e2e/tests/fips-compliance/cluster_agent_fips_test.go b/test/new-e2e/tests/fips-compliance/cluster_agent_fips_test.go index b36fc6b5c3f961..d86adcead6054c 100644 --- a/test/new-e2e/tests/fips-compliance/cluster_agent_fips_test.go +++ b/test/new-e2e/tests/fips-compliance/cluster_agent_fips_test.go @@ -123,13 +123,13 @@ func (s *fipsServerClusterAgentSuite) startFIPSServerWithClusterAgentImage(tc ci "CLUSTER_AGENT_IMAGE": s.clusterAgentImage, } if tc.cipher != "" { - envVars["CIPHER"] = fmt.Sprintf("-c %s", tc.cipher) + envVars["CIPHER"] = "-c " + tc.cipher } if tc.tlsMax != "" { - envVars["TLS_MAX"] = fmt.Sprintf("--tls-max %s", tc.tlsMax) + envVars["TLS_MAX"] = "--tls-max " + tc.tlsMax } if tc.tlsMin != "" { - envVars["TLS_MIN"] = fmt.Sprintf("--tls-min %s", tc.tlsMin) + envVars["TLS_MIN"] = "--tls-min " + tc.tlsMin } cmd := fmt.Sprintf("docker-compose -f %s up --detach --wait --timeout 300", strings.TrimSpace(s.fipsServer.composeFiles)) @@ -177,7 +177,7 @@ func (s *fipsServerClusterAgentSuite) TestFIPSCiphers() { serverLogs := s.fipsServer.Logs() if tc.want { - assert.Contains(s.T(), serverLogs, fmt.Sprintf("Negotiated cipher suite: %s", tc.cipher)) + assert.Contains(s.T(), serverLogs, "Negotiated cipher suite: "+tc.cipher) } else { assert.Contains(s.T(), serverLogs, "no cipher suite supported by both client and server") } diff --git a/test/new-e2e/tests/fips-compliance/common.go b/test/new-e2e/tests/fips-compliance/common.go index a8b52ca4f3272e..d872831efc546a 100644 --- a/test/new-e2e/tests/fips-compliance/common.go +++ b/test/new-e2e/tests/fips-compliance/common.go @@ -53,13 +53,13 @@ func (s *fipsServer) Start(t *testing.T, tc cipherTestCase) { "CERT": tc.cert, } if tc.cipher != "" { - envVars["CIPHER"] = fmt.Sprintf("-c %s", tc.cipher) + envVars["CIPHER"] = "-c " + tc.cipher } if tc.tlsMax != "" { - envVars["TLS_MAX"] = fmt.Sprintf("--tls-max %s", tc.tlsMax) + envVars["TLS_MAX"] = "--tls-max " + tc.tlsMax } if tc.tlsMin != "" { - envVars["TLS_MIN"] = fmt.Sprintf("--tls-min %s", tc.tlsMin) + envVars["TLS_MIN"] = "--tls-min " + tc.tlsMin } cmd := fmt.Sprintf("docker-compose -f %s up --detach --wait --timeout 300", strings.TrimSpace(s.composeFiles)) @@ -136,7 +136,7 @@ func (s *fipsServerSuite[Env]) TestFIPSCiphers() { serverLogs := s.fipsServer.Logs() if tc.want { - assert.Contains(s.T(), serverLogs, fmt.Sprintf("Negotiated cipher suite: %s", tc.cipher)) + assert.Contains(s.T(), serverLogs, "Negotiated cipher suite: "+tc.cipher) } else { assert.Contains(s.T(), serverLogs, "no cipher suite supported by both client and server") } diff --git a/test/new-e2e/tests/fleet/agent/agent.go b/test/new-e2e/tests/fleet/agent/agent.go index 0ed3f12038588c..27eef8aed5cf70 100644 --- a/test/new-e2e/tests/fleet/agent/agent.go +++ b/test/new-e2e/tests/fleet/agent/agent.go @@ -105,7 +105,7 @@ func (a *Agent) runCommand(command string, args ...string) (string, error) { } err := retry.Do(func() error { - _, err := a.host.RemoteHost.Execute(fmt.Sprintf("%s config --all", baseCommand)) + _, err := a.host.RemoteHost.Execute(baseCommand + " config --all") return err }, retry.Attempts(10), retry.Delay(1*time.Second), retry.DelayType(retry.FixedDelay)) if err != nil { diff --git a/test/new-e2e/tests/fleet/agent/install.go b/test/new-e2e/tests/fleet/agent/install.go index 6df0e9131307c5..163f8e0c7452fe 100644 --- a/test/new-e2e/tests/fleet/agent/install.go +++ b/test/new-e2e/tests/fleet/agent/install.go @@ -124,7 +124,7 @@ func (a *Agent) installWindowsInstallScript(params *installParams) error { } env["DD_SITE"] = "datad0g.com" env["DD_INSTALLER_URL"] = artifactURL - env["DD_INSTALLER_DEFAULT_PKG_VERSION_DATADOG_AGENT"] = fmt.Sprintf("pipeline-%s", os.Getenv("E2E_PIPELINE_ID")) + env["DD_INSTALLER_DEFAULT_PKG_VERSION_DATADOG_AGENT"] = "pipeline-" + os.Getenv("E2E_PIPELINE_ID") env["DD_INSTALLER_REGISTRY_URL_AGENT_PACKAGE"] = "installtesting.datad0g.com.internal.dda-testing.com" scriptURL = fmt.Sprintf("https://installtesting.datad0g.com/pipeline-%s/scripts/Install-Datadog.ps1", os.Getenv("E2E_PIPELINE_ID")) } diff --git a/test/new-e2e/tests/fleet/backend/backend.go b/test/new-e2e/tests/fleet/backend/backend.go index ee158d7e145ba4..8e52a4acb84173 100644 --- a/test/new-e2e/tests/fleet/backend/backend.go +++ b/test/new-e2e/tests/fleet/backend/backend.go @@ -8,6 +8,7 @@ package backend import ( "encoding/json" + "errors" "fmt" "os" "sort" @@ -266,7 +267,7 @@ func (b *Backend) Catalog() *Catalog { func (b *Backend) getCatalog() (*Catalog, error) { var catalog Catalog - urls := []string{fmt.Sprintf("installtesting.datad0g.com/agent-package:pipeline-%s", os.Getenv("E2E_PIPELINE_ID"))} + urls := []string{"installtesting.datad0g.com/agent-package:pipeline-" + os.Getenv("E2E_PIPELINE_ID")} var prodTags []string err := retry.Do(func() error { var err error @@ -277,7 +278,7 @@ func (b *Backend) getCatalog() (*Catalog, error) { return nil, err } for _, tag := range prodTags { - urls = append(urls, fmt.Sprintf("install.datadoghq.com/agent-package:%s", tag)) + urls = append(urls, "install.datadoghq.com/agent-package:"+tag) } for _, url := range urls { var version string @@ -303,7 +304,7 @@ func (b *Backend) getCatalog() (*Catalog, error) { catalog.packages = append(catalog.packages, catalogEntry{ Package: "datadog-agent", Version: version, - URL: fmt.Sprintf("oci://%s", url), + URL: "oci://" + url, branch: branch, }) } @@ -351,7 +352,7 @@ func (b *Backend) runDaemonCommand(command string, args ...string) (string, erro case e2eos.LinuxFamily: sanitizeCharacter = `\"` baseCommand = "sudo datadog-installer daemon" - _, err := b.host.RemoteHost.Execute(fmt.Sprintf("%s --help", baseCommand)) + _, err := b.host.RemoteHost.Execute(baseCommand + " --help") if err != nil { if !strings.Contains(err.Error(), "unknown command") { return "", err @@ -366,7 +367,7 @@ func (b *Backend) runDaemonCommand(command string, args ...string) (string, erro } err := retry.Do(func() error { - _, err := b.host.RemoteHost.Execute(fmt.Sprintf("%s rc-status", baseCommand)) + _, err := b.host.RemoteHost.Execute(baseCommand + " rc-status") return err }) if err != nil { @@ -413,7 +414,7 @@ func (b *Backend) getDaemonPID() (int, error) { return 0, err } if pid == "0" { - return 0, fmt.Errorf("daemon PID is 0") + return 0, errors.New("daemon PID is 0") } return strconv.Atoi(pid) } diff --git a/test/new-e2e/tests/gpu/capabilities.go b/test/new-e2e/tests/gpu/capabilities.go index f9ab7ce9ec686d..9ee3d901ea4ec7 100644 --- a/test/new-e2e/tests/gpu/capabilities.go +++ b/test/new-e2e/tests/gpu/capabilities.go @@ -93,7 +93,7 @@ func (c *hostCapabilities) QuerySysprobe(path string) (string, error) { } func (c *hostCapabilities) removeContainer(containerName string) error { - _, err := c.suite.Env().RemoteHost.Execute(fmt.Sprintf("docker rm -f %s", containerName)) + _, err := c.suite.Env().RemoteHost.Execute("docker rm -f " + containerName) return err } @@ -132,7 +132,7 @@ func (c *hostCapabilities) RunContainerWorkloadWithGPUs(image string, arguments // Cleanup the container _ = c.removeContainer(containerName) }) - containerIDCmd := fmt.Sprintf("docker inspect -f {{.Id}} %s", containerName) + containerIDCmd := "docker inspect -f {{.Id}} " + containerName idOut, err := c.suite.Env().RemoteHost.Execute(containerIDCmd) if err != nil { return "", err @@ -145,7 +145,7 @@ func (c *hostCapabilities) RunContainerWorkloadWithGPUs(image string, arguments func (c *hostCapabilities) GetRestartCount(component agentComponent) int { service := agentComponentToSystemdService[component] - out, err := c.suite.Env().RemoteHost.Execute(fmt.Sprintf("systemctl show -p NRestarts %s", service)) + out, err := c.suite.Env().RemoteHost.Execute("systemctl show -p NRestarts " + service) c.suite.Require().NoError(err) c.suite.Require().NotEmpty(out) @@ -165,7 +165,7 @@ func (c *hostCapabilities) CheckWorkloadErrors(containerID string) error { } // Check container exit code using docker inspect - exitCodeCmd := fmt.Sprintf("docker inspect -f '{{.State.ExitCode}}' %s", containerName) + exitCodeCmd := "docker inspect -f '{{.State.ExitCode}}' " + containerName exitCodeOut, err := c.suite.Env().RemoteHost.Execute(exitCodeCmd) if err != nil { return fmt.Errorf("error inspecting container %s: %w", containerName, err) @@ -179,7 +179,7 @@ func (c *hostCapabilities) CheckWorkloadErrors(containerID string) error { if exitCode != 0 { // Get container status for more details - statusCmd := fmt.Sprintf("docker inspect -f '{{.State.Status}}' %s", containerName) + statusCmd := "docker inspect -f '{{.State.Status}}' " + containerName statusOut, _ := c.suite.Env().RemoteHost.Execute(statusCmd) status := strings.TrimSpace(statusOut) diff --git a/test/new-e2e/tests/gpu/gpu_test.go b/test/new-e2e/tests/gpu/gpu_test.go index 3688fe3a78c2a7..3a0bb6696ad3cd 100644 --- a/test/new-e2e/tests/gpu/gpu_test.go +++ b/test/new-e2e/tests/gpu/gpu_test.go @@ -98,7 +98,7 @@ func dockerImageName() string { func mandatoryMetricTagRegexes() []*regexp.Regexp { regexes := make([]*regexp.Regexp, 0, len(mandatoryMetricTags)) for _, tag := range mandatoryMetricTags { - regexes = append(regexes, regexp.MustCompile(fmt.Sprintf("%s:.*", tag))) + regexes = append(regexes, regexp.MustCompile(tag+":.*")) } return regexes diff --git a/test/new-e2e/tests/gpu/provisioner.go b/test/new-e2e/tests/gpu/provisioner.go index 95ce297119abe8..35851cd65a84a3 100644 --- a/test/new-e2e/tests/gpu/provisioner.go +++ b/test/new-e2e/tests/gpu/provisioner.go @@ -365,7 +365,7 @@ func downloadDockerImages(e *aws.Environment, vm *componentsremote.Host, images var cmds []pulumi.Resource for i, image := range images { - pullCmd := makeRetryCommand(fmt.Sprintf("docker pull %s", image), dockerPullMaxRetries) + pullCmd := makeRetryCommand("docker pull "+image, dockerPullMaxRetries) cmd, err := vm.OS.Runner().Command( e.CommonNamer().ResourceName("docker-pull", strconv.Itoa(i)), &command.Args{ @@ -401,7 +401,7 @@ func downloadContainerdImagesInKindNodes(e *aws.Environment, vm *componentsremot var cmds []pulumi.Resource for i, image := range images { - pullCmd := makeRetryCommand(fmt.Sprintf("crictl pull %s", image), dockerPullMaxRetries) + pullCmd := makeRetryCommand("crictl pull "+image, dockerPullMaxRetries) cmd, err := vm.OS.Runner().Command( e.CommonNamer().ResourceName("kind-node-pull", fmt.Sprintf("image-%d", i)), &command.Args{ diff --git a/test/new-e2e/tests/installer/host/fixtures.go b/test/new-e2e/tests/installer/host/fixtures.go index 3d2c37fa77a2e3..dfc138be25b4d1 100644 --- a/test/new-e2e/tests/installer/host/fixtures.go +++ b/test/new-e2e/tests/installer/host/fixtures.go @@ -38,7 +38,7 @@ func (h *Host) uploadFixtures() { for _, fixture := range fixtures { if filepath.Ext(fixture.Name()) == ".sh" { fixturePath := filepath.Join("/opt/fixtures", fixture.Name()) - h.remote.MustExecute(fmt.Sprintf("chmod +x %s", fixturePath)) + h.remote.MustExecute("chmod +x " + fixturePath) } } diff --git a/test/new-e2e/tests/installer/host/host.go b/test/new-e2e/tests/installer/host/host.go index ab26bfee4476f1..eee9d897fd7c90 100644 --- a/test/new-e2e/tests/installer/host/host.go +++ b/test/new-e2e/tests/installer/host/host.go @@ -138,13 +138,13 @@ func (h *Host) Run(command string, env ...string) string { // UserExists checks if a user exists on the host. func (h *Host) UserExists(username string) bool { - _, err := h.remote.Execute(fmt.Sprintf("id -u %s", username)) + _, err := h.remote.Execute("id -u " + username) return err == nil } // GroupExists checks if a group exists on the host. func (h *Host) GroupExists(groupname string) bool { - _, err := h.remote.Execute(fmt.Sprintf("id -g %s", groupname)) + _, err := h.remote.Execute("id -g " + groupname) return err == nil } @@ -166,15 +166,15 @@ func (h *Host) WriteFile(path string, content []byte) error { // DeletePath deletes a path on the host. func (h *Host) DeletePath(path string) { - h.remote.MustExecute(fmt.Sprintf("sudo ls %s", path)) - h.remote.MustExecute(fmt.Sprintf("sudo rm -rf %s", path)) + h.remote.MustExecute("sudo ls " + path) + h.remote.MustExecute("sudo rm -rf " + path) } // WaitForUnitActive waits for a systemd unit to be active func (h *Host) WaitForUnitActive(t *testing.T, units ...string) { for _, unit := range units { assert.Eventually(t, func() bool { - _, err := h.remote.Execute(fmt.Sprintf("systemctl is-active --quiet %s", unit)) + _, err := h.remote.Execute("systemctl is-active --quiet " + unit) return err == nil }, time.Second*90, time.Second*2, "unit %s did not become active. logs: %s", unit, h.remote.MustExecute("sudo journalctl -xeu "+unit)) @@ -395,10 +395,12 @@ func (h *Host) fs() map[string]FileInfo { "/run/utmp", "/tmp", } - cmd := "sudo find / " + var cmdBuilder strings.Builder + cmdBuilder.WriteString("sudo find / ") for _, dir := range ignoreDirs { - cmd += fmt.Sprintf("-path '%s' -prune -o ", dir) + fmt.Fprintf(&cmdBuilder, "-path '%s' -prune -o ", dir) } + cmd := cmdBuilder.String() cmd += `-printf '%p\\|//%s\\|//%TY-%Tm-%Td %TH:%TM:%TS\\|//%f\\|//%m\\|//%u\\|//%g\\|//%y\\|//%l\n' 2>/dev/null` output := h.remote.MustExecute(cmd + " || true") lines := strings.Split(output, "\n") @@ -500,7 +502,7 @@ func (h *Host) SetUmask(mask string) (oldmask string) { } else { h.remote.MustExecute(fmt.Sprintf("sed -i -E 's/umask %s/umask %s/g' ~/.bashrc", oldmask, mask)) } - h.remote.MustExecute(fmt.Sprintf("umask | grep -q %s", mask)) // Correctness check + h.remote.MustExecute("umask | grep -q " + mask) // Correctness check return oldmask } diff --git a/test/new-e2e/tests/installer/host/systemd.go b/test/new-e2e/tests/installer/host/systemd.go index 09083e9e1d1bcd..4f9ff94ec4f3d9 100644 --- a/test/new-e2e/tests/installer/host/systemd.go +++ b/test/new-e2e/tests/installer/host/systemd.go @@ -39,7 +39,7 @@ func (h *Host) LastJournaldTimestamp() JournaldTimestamp { func (h *Host) AssertUnitProperty(unit, property, value string) { res, err := h.remote.Execute(fmt.Sprintf("sudo systemctl show -p %s %s", property, unit)) require.NoError(h.t(), err) - require.Equal(h.t(), fmt.Sprintf("%s=%s\n", property, value), res, "unit %s: %s != %s.\nUnit:\n%s", unit, fmt.Sprintf("%s=%s\n", property, value), res, h.remote.MustExecute(fmt.Sprintf("sudo systemctl cat %s", unit))) + require.Equal(h.t(), fmt.Sprintf("%s=%s\n", property, value), res, "unit %s: %s != %s.\nUnit:\n%s", unit, fmt.Sprintf("%s=%s\n", property, value), res, h.remote.MustExecute("sudo systemctl cat "+unit)) } func popIfMatches(searchedEvents []SystemdEvent, log journaldLog) []SystemdEvent { @@ -123,7 +123,7 @@ func (h *Host) AssertSystemdEvents(since JournaldTimestamp, events SystemdEventS } for unit := range units { - h.t().Logf("--- Logs for unit %s:\n%s", unit, h.remote.MustExecute(fmt.Sprintf("sudo journalctl -xeu %s", unit))) + h.t().Logf("--- Logs for unit %s:\n%s", unit, h.remote.MustExecute("sudo journalctl -xeu "+unit)) } } } diff --git a/test/new-e2e/tests/installer/script/all_scripts_test.go b/test/new-e2e/tests/installer/script/all_scripts_test.go index 2b9ab86497f192..5d536eded4c99b 100644 --- a/test/new-e2e/tests/installer/script/all_scripts_test.go +++ b/test/new-e2e/tests/installer/script/all_scripts_test.go @@ -176,8 +176,8 @@ func (s *installerScriptBaseSuite) RunInstallScriptWithError(url string, params time.Sleep(1 * time.Second) } - scriptParams := append(params, fmt.Sprintf("DD_API_KEY=%s", installer.GetAPIKey()), "DD_INSTALLER_REGISTRY_URL_INSTALLER_PACKAGE=installtesting.datad0g.com.internal.dda-testing.com") - _, err = s.Env().RemoteHost.Execute(fmt.Sprintf("%s bash install_script", strings.Join(scriptParams, " "))) + scriptParams := append(params, "DD_API_KEY="+installer.GetAPIKey(), "DD_INSTALLER_REGISTRY_URL_INSTALLER_PACKAGE=installtesting.datad0g.com.internal.dda-testing.com") + _, err = s.Env().RemoteHost.Execute(strings.Join(scriptParams, " ") + " bash install_script") return err } diff --git a/test/new-e2e/tests/installer/script/databricks_test.go b/test/new-e2e/tests/installer/script/databricks_test.go index 057dc8b830e02f..a01a24b33c0ae5 100644 --- a/test/new-e2e/tests/installer/script/databricks_test.go +++ b/test/new-e2e/tests/installer/script/databricks_test.go @@ -6,8 +6,6 @@ package installscript import ( - "fmt" - e2eos "github.com/DataDog/datadog-agent/test/e2e-framework/components/os" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host" @@ -36,7 +34,7 @@ func testDatabricksScript(os e2eos.Descriptor, arch e2eos.Architecture) installe func (s *installScriptDatabricksSuite) TestDatabricksWorkerInstallScript() { s.RunInstallScript(s.url) state := s.host.State() - agentPath := fmt.Sprintf("/opt/datadog-packages/datadog-agent/%s", databricksAgentVersion) + agentPath := "/opt/datadog-packages/datadog-agent/" + databricksAgentVersion state.AssertDirExists(agentPath, 0755, "dd-agent", "dd-agent") state.AssertSymlinkExists("/opt/datadog-packages/datadog-agent/stable", agentPath, "root", "root") @@ -46,9 +44,9 @@ func (s *installScriptDatabricksSuite) TestDatabricksWorkerInstallScript() { func (s *installScriptDatabricksSuite) TestDatabricksDriverInstallScript() { s.RunInstallScript(s.url, "DB_IS_DRIVER=TRUE") state := s.host.State() - agentPath := fmt.Sprintf("/opt/datadog-packages/datadog-agent/%s", databricksAgentVersion) - javaPath := fmt.Sprintf("/opt/datadog-packages/datadog-apm-library-java/%s", databricksApmLibraryJavaVersion) - injectPath := fmt.Sprintf("/opt/datadog-packages/datadog-apm-inject/%s", databricksApmInjectVersion) + agentPath := "/opt/datadog-packages/datadog-agent/" + databricksAgentVersion + javaPath := "/opt/datadog-packages/datadog-apm-library-java/" + databricksApmLibraryJavaVersion + injectPath := "/opt/datadog-packages/datadog-apm-inject/" + databricksApmInjectVersion state.AssertDirExists(agentPath, 0755, "dd-agent", "dd-agent") state.AssertSymlinkExists("/opt/datadog-packages/datadog-agent/stable", agentPath, "root", "root") diff --git a/test/new-e2e/tests/installer/script/default_script_test.go b/test/new-e2e/tests/installer/script/default_script_test.go index d9c05bfc41d039..a8bcd8962d9d38 100644 --- a/test/new-e2e/tests/installer/script/default_script_test.go +++ b/test/new-e2e/tests/installer/script/default_script_test.go @@ -119,7 +119,7 @@ func (s *installScriptDefaultSuite) TestInstallParity() { if s.os.Flavor == e2eos.CentOS && s.os.Version == e2eos.CentOS7.Version { s.Env().RemoteHost.MustExecute("sudo systemctl daemon-reexec") } - _, err := s.Env().RemoteHost.Execute(fmt.Sprintf(`%s bash -c "$(curl -L https://dd-agent.s3.amazonaws.com/scripts/install_script_agent7.sh)"`, strings.Join(params, " ")), client.WithEnvVariables(map[string]string{ + _, err := s.Env().RemoteHost.Execute(strings.Join(params, " ")+" bash -c \"$(curl -L https://dd-agent.s3.amazonaws.com/scripts/install_script_agent7.sh)\"", client.WithEnvVariables(map[string]string{ "DD_API_KEY": installer.GetAPIKey(), "TESTING_KEYS_URL": "apttesting.datad0g.com/test-keys", "TESTING_APT_URL": fmt.Sprintf("s3.amazonaws.com/apttesting.datad0g.com/datadog-agent/pipeline-%s-a7", os.Getenv("E2E_PIPELINE_ID")), diff --git a/test/new-e2e/tests/installer/unix/all_packages_test.go b/test/new-e2e/tests/installer/unix/all_packages_test.go index ba3fc322d57daa..d22892bbe9fa09 100644 --- a/test/new-e2e/tests/installer/unix/all_packages_test.go +++ b/test/new-e2e/tests/installer/unix/all_packages_test.go @@ -201,12 +201,12 @@ func (s *packageBaseSuite) updateCurlOnUbuntu() { func (s *packageBaseSuite) RunInstallScriptProdOci(params ...string) error { env := map[string]string{} installScriptPackageManagerEnv(env, s.arch) - _, err := s.Env().RemoteHost.Execute(fmt.Sprintf(`%s bash -c "$(curl -L https://dd-agent.s3.amazonaws.com/scripts/install_script_agent7.sh)"`, strings.Join(params, " ")), client.WithEnvVariables(env)) + _, err := s.Env().RemoteHost.Execute(strings.Join(params, " ")+" bash -c \"$(curl -L https://dd-agent.s3.amazonaws.com/scripts/install_script_agent7.sh)\"", client.WithEnvVariables(env)) return err } func (s *packageBaseSuite) RunInstallScriptWithError(params ...string) error { - _, err := s.Env().RemoteHost.Execute(fmt.Sprintf(`%s bash -c "$(curl -L https://dd-agent.s3.amazonaws.com/scripts/install_script_agent7.sh)"`, strings.Join(params, " ")), client.WithEnvVariables(InstallScriptEnv(s.arch))) + _, err := s.Env().RemoteHost.Execute(strings.Join(params, " ")+" bash -c \"$(curl -L https://dd-agent.s3.amazonaws.com/scripts/install_script_agent7.sh)\"", client.WithEnvVariables(InstallScriptEnv(s.arch))) return err } @@ -229,7 +229,7 @@ func (s *packageBaseSuite) RunInstallScript(params ...string) { (s.os.Flavor == e2eos.CentOS && s.os.Version == e2eos.CentOS7.Version) { s.T().Skip("Ansible doesn't install support Python2 anymore") } else { - _, err = s.Env().RemoteHost.Execute(fmt.Sprintf("%sansible-galaxy collection install -vvv datadog.dd", ansiblePrefix)) + _, err = s.Env().RemoteHost.Execute(ansiblePrefix + "ansible-galaxy collection install -vvv datadog.dd") } if err == nil { break @@ -266,7 +266,7 @@ func envForceVersion(pkg, version string) string { func (s *packageBaseSuite) Purge() { // Reset the systemctl failed counter, best effort as they may not be loaded for _, service := range []string{agentUnit, agentUnitXP, traceUnit, traceUnitXP, processUnit, processUnitXP, probeUnit, probeUnitXP, securityUnit, securityUnitXP, ddotUnit, ddotUnitXP, dataPlaneUnit, dataPlaneUnitXP} { - s.Env().RemoteHost.Execute(fmt.Sprintf("sudo systemctl reset-failed %s", service)) + s.Env().RemoteHost.Execute("sudo systemctl reset-failed " + service) } // Unfortunately no guarantee that the datadog-installer symlink exists @@ -419,10 +419,12 @@ func (s *packageBaseSuite) writeAnsiblePlaybook(env map[string]string, params .. playbookStringSuffix += fmt.Sprintf(" datadog_yum_repo: \"https://%s/%s/%s/\"\n", defaultRepoEnv["TESTING_YUM_URL"], defaultRepoEnv["TESTING_YUM_VERSION_PATH"], archi) } if len(environments) > 0 { - playbookStringPrefix += " environment:\n" + var envBuilder strings.Builder + envBuilder.WriteString(" environment:\n") for _, env := range environments { - playbookStringPrefix += fmt.Sprintf(" %s\n", env) + fmt.Fprintf(&envBuilder, " %s\n", env) } + playbookStringPrefix += envBuilder.String() } playbookString := playbookStringPrefix + playbookStringSuffix diff --git a/test/new-e2e/tests/installer/unix/package_agent_test.go b/test/new-e2e/tests/installer/unix/package_agent_test.go index 97f10d6abd7bff..b6d0f94339c80c 100644 --- a/test/new-e2e/tests/installer/unix/package_agent_test.go +++ b/test/new-e2e/tests/installer/unix/package_agent_test.go @@ -60,7 +60,7 @@ func (s *packageAgentSuite) TestInstall() { agentVersion := s.host.AgentStableVersion() agentDir := "/opt/datadog-agent" - agentRunSymlink := fmt.Sprintf("/opt/datadog-packages/run/datadog-agent/%s", agentVersion) + agentRunSymlink := "/opt/datadog-packages/run/datadog-agent/" + agentVersion installerSymlink := path.Join(agentDir, "embedded/bin/installer") agentSymlink := path.Join(agentDir, "bin/agent/agent") @@ -300,7 +300,7 @@ func (s *packageAgentSuite) TestExperimentStopped() { // stop experiment timestamp = s.host.LastJournaldTimestamp() - s.host.Run(fmt.Sprintf(`sudo systemctl %s`, stopCommand)) + s.host.Run("sudo systemctl " + stopCommand) s.host.AssertSystemdEvents(timestamp, host.SystemdEvents(). // stop order diff --git a/test/new-e2e/tests/installer/unix/package_apm_inject_test.go b/test/new-e2e/tests/installer/unix/package_apm_inject_test.go index eb27b98b679128..f45301b2a5a970 100644 --- a/test/new-e2e/tests/installer/unix/package_apm_inject_test.go +++ b/test/new-e2e/tests/installer/unix/package_apm_inject_test.go @@ -9,6 +9,7 @@ import ( "fmt" "math/rand/v2" "path/filepath" + "strconv" "strings" "time" @@ -65,9 +66,9 @@ func (s *packageApmInjectSuite) TestInstall() { s.assertStableConfig(map[string]interface{}{}) traceID := rand.Uint64() - s.host.CallExamplePythonApp(fmt.Sprint(traceID)) + s.host.CallExamplePythonApp(strconv.FormatUint(traceID, 10)) traceIDDocker := rand.Uint64() - s.host.CallExamplePythonAppInDocker(fmt.Sprint(traceIDDocker)) + s.host.CallExamplePythonAppInDocker(strconv.FormatUint(traceIDDocker, 10)) s.assertTraceReceived(traceID) s.assertTraceReceived(traceIDDocker) @@ -256,7 +257,7 @@ func (s *packageApmInjectSuite) TestVersionBump() { defer s.host.StopExamplePythonApp() traceID := rand.Uint64() - s.host.CallExamplePythonApp(fmt.Sprint(traceID)) + s.host.CallExamplePythonApp(strconv.FormatUint(traceID, 10)) s.assertTraceReceived(traceID) // Re-run the install script with the latest tracer version @@ -281,9 +282,9 @@ func (s *packageApmInjectSuite) TestVersionBump() { defer s.host.StopExamplePythonAppInDocker() traceID = rand.Uint64() - s.host.CallExamplePythonApp(fmt.Sprint(traceID)) + s.host.CallExamplePythonApp(strconv.FormatUint(traceID, 10)) traceIDDocker := rand.Uint64() - s.host.CallExamplePythonAppInDocker(fmt.Sprint(traceIDDocker)) + s.host.CallExamplePythonAppInDocker(strconv.FormatUint(traceIDDocker, 10)) s.assertTraceReceived(traceID) s.assertTraceReceived(traceIDDocker) diff --git a/test/new-e2e/tests/installer/unix/package_ddot_test.go b/test/new-e2e/tests/installer/unix/package_ddot_test.go index 044709d2420a6b..416b2d275b6935 100644 --- a/test/new-e2e/tests/installer/unix/package_ddot_test.go +++ b/test/new-e2e/tests/installer/unix/package_ddot_test.go @@ -52,7 +52,7 @@ func (s *packageDDOTSuite) RunInstallScriptWithError(params ...string) error { return err } - _, err := s.Env().RemoteHost.Execute(fmt.Sprintf(`%s bash -c "$(curl -L https://dd-agent.s3.amazonaws.com/scripts/install_script_agent7.sh)"`, strings.Join(params, " ")), client.WithEnvVariables(InstallScriptEnv(s.arch))) + _, err := s.Env().RemoteHost.Execute(strings.Join(params, " ")+" bash -c \"$(curl -L https://dd-agent.s3.amazonaws.com/scripts/install_script_agent7.sh)\"", client.WithEnvVariables(InstallScriptEnv(s.arch))) return err } @@ -105,7 +105,7 @@ func (s *packageDDOTSuite) TestInstallDDOTInstaller() { s.host.WaitForUnitActive(s.T(), agentUnit, traceUnit) // Install ddot - s.host.Run(fmt.Sprintf("sudo datadog-installer install oci://installtesting.datad0g.com.internal.dda-testing.com/ddot-package:pipeline-%s", os.Getenv("E2E_PIPELINE_ID"))) + s.host.Run("sudo datadog-installer install oci://installtesting.datad0g.com.internal.dda-testing.com/ddot-package:pipeline-" + os.Getenv("E2E_PIPELINE_ID")) s.host.AssertPackageInstalledByInstaller("datadog-agent-ddot") // Check if datadog.yaml exists, if not return an error diff --git a/test/new-e2e/tests/installer/unix/package_definitions.go b/test/new-e2e/tests/installer/unix/package_definitions.go index b5cdee064edd38..05416bf19cc19e 100644 --- a/test/new-e2e/tests/installer/unix/package_definitions.go +++ b/test/new-e2e/tests/installer/unix/package_definitions.go @@ -102,13 +102,13 @@ func installScriptInstallerEnv(env map[string]string, packagesConfig []TestPacka name := strings.ToUpper(strings.ReplaceAll(pkg.Name, "-", "_")) image := strings.TrimPrefix(name, "DATADOG_") + "_PACKAGE" if pkg.Registry != "" { - env[fmt.Sprintf("DD_INSTALLER_REGISTRY_URL_%s", image)] = pkg.Registry + env["DD_INSTALLER_REGISTRY_URL_"+image] = pkg.Registry } if pkg.Auth != "" { - env[fmt.Sprintf("DD_INSTALLER_REGISTRY_AUTH_%s", image)] = pkg.Auth + env["DD_INSTALLER_REGISTRY_AUTH_"+image] = pkg.Auth } if pkg.Version != "" && pkg.Version != "latest" { - env[fmt.Sprintf("DD_INSTALLER_DEFAULT_PKG_VERSION_%s", name)] = pkg.Version + env["DD_INSTALLER_DEFAULT_PKG_VERSION_"+name] = pkg.Version } } } @@ -151,7 +151,7 @@ func GetAPIKey() string { // PipelineAgentVersion returns the version of the pipeline agent func PipelineAgentVersion(t *testing.T) string { - ref := fmt.Sprintf("installtesting.datad0g.com/agent-package:pipeline-%s", os.Getenv("E2E_PIPELINE_ID")) + ref := "installtesting.datad0g.com/agent-package:pipeline-" + os.Getenv("E2E_PIPELINE_ID") p := v1.Platform{ OS: "linux", Architecture: "amd64", diff --git a/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go b/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go index 2b0aabea714429..ed087424c3bd17 100644 --- a/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go +++ b/test/new-e2e/tests/installer/unix/upgrade_scenario_test.go @@ -89,7 +89,7 @@ func (s *upgradeScenarioSuite) testCatalog() catalog { { Package: string(datadogAgent), Version: s.pipelineAgentVersion, - URL: fmt.Sprintf("oci://installtesting.datad0g.com.internal.dda-testing.com/agent-package:pipeline-%s", os.Getenv("E2E_PIPELINE_ID")), + URL: "oci://installtesting.datad0g.com.internal.dda-testing.com/agent-package:pipeline-" + os.Getenv("E2E_PIPELINE_ID"), }, { Package: string(datadogApmInject), @@ -122,8 +122,8 @@ func (s *upgradeScenarioSuite) TestUpgradeSuccessfulFromDebRPM() { currentVersion := s.getInstallerStatus().Packages.States["datadog-agent"].Stable // Assert stable symlink exists properly state := s.host.State() - state.AssertSymlinkExists("/opt/datadog-packages/datadog-agent/stable", fmt.Sprintf("/opt/datadog-packages/run/datadog-agent/%s", currentVersion), "root", "root") - state.AssertSymlinkExists(fmt.Sprintf("/opt/datadog-packages/run/datadog-agent/%s", currentVersion), "/opt/datadog-agent", "root", "root") + state.AssertSymlinkExists("/opt/datadog-packages/datadog-agent/stable", "/opt/datadog-packages/run/datadog-agent/"+currentVersion, "root", "root") + state.AssertSymlinkExists("/opt/datadog-packages/run/datadog-agent/"+currentVersion, "/opt/datadog-agent", "root", "root") // Set remote_updates to true in datadog.yaml s.Env().RemoteHost.MustExecute(`printf "\nremote_updates: true\n" | sudo tee -a /etc/datadog-agent/datadog.yaml`) @@ -144,8 +144,8 @@ func (s *upgradeScenarioSuite) TestUpgradeSuccessfulFromDebRPM() { // Assert stable symlink still exists properly state = s.host.State() - state.AssertSymlinkExists("/opt/datadog-packages/datadog-agent/stable", fmt.Sprintf("/opt/datadog-packages/run/datadog-agent/%s", currentVersion), "root", "root") - state.AssertSymlinkExists(fmt.Sprintf("/opt/datadog-packages/run/datadog-agent/%s", currentVersion), "/opt/datadog-agent", "root", "root") + state.AssertSymlinkExists("/opt/datadog-packages/datadog-agent/stable", "/opt/datadog-packages/run/datadog-agent/"+currentVersion, "root", "root") + state.AssertSymlinkExists("/opt/datadog-packages/run/datadog-agent/"+currentVersion, "/opt/datadog-agent", "root", "root") timestamp = s.host.LastJournaldTimestamp() s.promoteExperiment(datadogAgent) @@ -222,7 +222,7 @@ func (s *upgradeScenarioSuite) TestExperimentCurrentVersion() { { Package: "datadog-agent", Version: currentVersion, - URL: fmt.Sprintf("oci://dd-agent.s3.amazonaws.com/agent-package:%s", currentVersion), + URL: "oci://dd-agent.s3.amazonaws.com/agent-package:" + currentVersion, }, }, } diff --git a/test/new-e2e/tests/installer/windows/base_suite.go b/test/new-e2e/tests/installer/windows/base_suite.go index 7c2462e67cf59d..71b0c1855a3293 100644 --- a/test/new-e2e/tests/installer/windows/base_suite.go +++ b/test/new-e2e/tests/installer/windows/base_suite.go @@ -227,8 +227,8 @@ func (s *BaseSuite) createStableAgent() { // // see doc.go for more information func (s *BaseSuite) getAgentVersionVars(prefix string) (string, string) { - versionVar := fmt.Sprintf("%s_VERSION", prefix) - versionPackageVar := fmt.Sprintf("%s_VERSION_PACKAGE", prefix) + versionVar := prefix + "_VERSION" + versionPackageVar := prefix + "_VERSION_PACKAGE" // Agent version version := os.Getenv(versionVar) @@ -291,7 +291,7 @@ func (s *BaseSuite) AfterTest(suiteName, testName string) { for _, logName := range []string{"System", "Application"} { // collect the full event log as an evtx file s.T().Logf("Exporting %s event log", logName) - outputPath := filepath.Join(s.SessionOutputDir(), fmt.Sprintf("%s.evtx", logName)) + outputPath := filepath.Join(s.SessionOutputDir(), logName+".evtx") err := windowscommon.ExportEventLog(vm, logName, outputPath) s.Assert().NoError(err, "should export %s event log", logName) // Log errors and warnings to the screen for easy access diff --git a/test/new-e2e/tests/installer/windows/installer.go b/test/new-e2e/tests/installer/windows/installer.go index f9f76915505f66..13c2b72337fd5f 100644 --- a/test/new-e2e/tests/installer/windows/installer.go +++ b/test/new-e2e/tests/installer/windows/installer.go @@ -7,6 +7,7 @@ package installer import ( "encoding/json" + "errors" "fmt" "os" "path" @@ -217,12 +218,12 @@ func (d *DatadogInstaller) InstallExperiment(packageName string, opts ...install // RemovePackage requests that the Datadog Installer removes a package on the remote host. func (d *DatadogInstaller) RemovePackage(packageName string) (string, error) { - return d.execute(fmt.Sprintf("remove %s", packageName)) + return d.execute("remove " + packageName) } // RemoveExperiment requests that the Datadog Installer removes a package on the remote host. func (d *DatadogInstaller) RemoveExperiment(packageName string) (string, error) { - return d.execute(fmt.Sprintf("remove-experiment %s", packageName)) + return d.execute("remove-experiment " + packageName) } // Status returns the status provided by the running daemon @@ -269,7 +270,7 @@ func (d *DatadogInstaller) Install(opts ...MsiOption) error { d.env.RemoteHost.CopyFile(localMSIPath, remoteMSIPath) } if remoteMSIPath == "" { - return fmt.Errorf("MSI URL/path is required but was not provided") + return errors.New("MSI URL/path is required but was not provided") } logPath := filepath.Join(d.outputDir, params.msiLogFilename) if _, err := os.Stat(logPath); err == nil { @@ -350,7 +351,7 @@ func CreatePackageSourceIfLocal(host *components.RemoteHost, pkg TestPackageConf } // Must replace slashes so that daemon can parse it correctly outPath = strings.ReplaceAll(outPath, "\\", "/") - pkg.urloverride = fmt.Sprintf("file://%s", outPath) + pkg.urloverride = "file://" + outPath } return pkg, nil } @@ -468,7 +469,7 @@ func WithURLOverride(url string) PackageOption { // WithPipeline configures the package to be installed from a pipeline. func WithPipeline(pipeline string) PackageOption { return func(params *TestPackageConfig) error { - params.Version = fmt.Sprintf("pipeline-%s", pipeline) + params.Version = "pipeline-" + pipeline if err := WithRegistry(consts.PipelineOCIRegistry)(params); err != nil { return err } @@ -505,13 +506,13 @@ func WithPackage(pkg TestPackageConfig) PackageOption { func WithDevEnvOverrides(prefix string) PackageOption { return func(params *TestPackageConfig) error { // env vars for convenience - if url, ok := os.LookupEnv(fmt.Sprintf("%s_OCI_URL", prefix)); ok { + if url, ok := os.LookupEnv(prefix + "_OCI_URL"); ok { err := WithURLOverride(url)(params) if err != nil { return err } } - if pipeline, ok := os.LookupEnv(fmt.Sprintf("%s_OCI_PIPELINE", prefix)); ok { + if pipeline, ok := os.LookupEnv(prefix + "_OCI_PIPELINE"); ok { err := WithPipeline(pipeline)(params) if err != nil { return err @@ -519,19 +520,19 @@ func WithDevEnvOverrides(prefix string) PackageOption { } // env vars for specific fields - if version, ok := os.LookupEnv(fmt.Sprintf("%s_OCI_VERSION", prefix)); ok { + if version, ok := os.LookupEnv(prefix + "_OCI_VERSION"); ok { err := WithVersion(version)(params) if err != nil { return err } } - if registry, ok := os.LookupEnv(fmt.Sprintf("%s_OCI_REGISTRY", prefix)); ok { + if registry, ok := os.LookupEnv(prefix + "_OCI_REGISTRY"); ok { err := WithRegistry(registry)(params) if err != nil { return err } } - if auth, ok := os.LookupEnv(fmt.Sprintf("%s_OCI_AUTH", prefix)); ok { + if auth, ok := os.LookupEnv(prefix + "_OCI_AUTH"); ok { err := WithAuthentication(auth)(params) if err != nil { return err @@ -581,12 +582,12 @@ func (d *DatadogInstaller) StartConfigExperiment(packageName string, config Conf // PromoteConfigExperiment promotes a config experiment through the daemon. func (d *DatadogInstaller) PromoteConfigExperiment(packageName string) (string, error) { - return d.execute(fmt.Sprintf("daemon promote-config-experiment %s", packageName)) + return d.execute("daemon promote-config-experiment " + packageName) } // StopConfigExperiment stops a config experiment through the daemon. func (d *DatadogInstaller) StopConfigExperiment(packageName string) (string, error) { - return d.execute(fmt.Sprintf("daemon stop-config-experiment %s", packageName)) + return d.execute("daemon stop-config-experiment " + packageName) } // ConfigExperiment represents a configuration experiment for the Datadog Installer. diff --git a/test/new-e2e/tests/installer/windows/params.go b/test/new-e2e/tests/installer/windows/params.go index 01a8caf7fcef2b..932dceaaa9f220 100644 --- a/test/new-e2e/tests/installer/windows/params.go +++ b/test/new-e2e/tests/installer/windows/params.go @@ -6,7 +6,6 @@ package installer import ( - "fmt" "os" "strings" @@ -154,19 +153,19 @@ func WithMSILogFile(filename string) MsiOption { // export CURRENT_AGENT_MSI_URL="https://s3.amazonaws.com/dd-agent-mstesting/builds/beta/ddagent-cli-7.64.0-rc.9.msi" func WithMSIDevEnvOverrides(prefix string) MsiOption { return func(params *MsiParams) error { - if url, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_URL", prefix)); ok { + if url, ok := os.LookupEnv(prefix + "_MSI_URL"); ok { err := WithOption(WithInstallerURL(url))(params) if err != nil { return err } } - if pipeline, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_PIPELINE", prefix)); ok { + if pipeline, ok := os.LookupEnv(prefix + "_MSI_PIPELINE"); ok { err := WithOption(WithURLFromPipeline(pipeline))(params) if err != nil { return err } } - if version, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_VERSION", prefix)); ok { + if version, ok := os.LookupEnv(prefix + "_MSI_VERSION"); ok { err := WithOption(WithURLFromInstallersJSON(pipeline.StableURL, version))(params) if err != nil { return err @@ -187,13 +186,13 @@ func WithMSIDevEnvOverrides(prefix string) MsiOption { // export CURRENT_AGENT_INSTALLER_SCRIPT="file:///path/to/install.ps1" func WithInstallScriptDevEnvOverrides(prefix string) Option { return func(params *Params) error { - if url, ok := os.LookupEnv(fmt.Sprintf("%s_INSTALLER_URL", prefix)); ok { + if url, ok := os.LookupEnv(prefix + "_INSTALLER_URL"); ok { err := WithInstallerURL(url)(params) if err != nil { return err } } - if script, ok := os.LookupEnv(fmt.Sprintf("%s_INSTALLER_SCRIPT", prefix)); ok { + if script, ok := os.LookupEnv(prefix + "_INSTALLER_SCRIPT"); ok { err := WithInstallerScript(script)(params) if err != nil { return err diff --git a/test/new-e2e/tests/installer/windows/suites/agent-package/domain_test.go b/test/new-e2e/tests/installer/windows/suites/agent-package/domain_test.go index 9de4623a41b9cd..c47dd8da029058 100644 --- a/test/new-e2e/tests/installer/windows/suites/agent-package/domain_test.go +++ b/test/new-e2e/tests/installer/windows/suites/agent-package/domain_test.go @@ -6,7 +6,6 @@ package agenttests import ( - "fmt" "os" "github.com/DataDog/datadog-agent/test/e2e-framework/components/activedirectory" @@ -53,8 +52,8 @@ func (s *testAgentUpgradeOnDCSuite) TestUpgradeMSI() { // Install the stable MSI artifact s.installPreviousAgentVersion( - installerwindows.WithMSIArg(fmt.Sprintf("DDAGENTUSER_NAME=%s", TestUser)), - installerwindows.WithMSIArg(fmt.Sprintf("DDAGENTUSER_PASSWORD=%s", TestPassword)), + installerwindows.WithMSIArg("DDAGENTUSER_NAME="+TestUser), + installerwindows.WithMSIArg("DDAGENTUSER_PASSWORD="+TestPassword), ) s.AssertSuccessfulAgentPromoteExperiment(s.StableAgentVersion().PackageVersion()) @@ -90,8 +89,8 @@ func (s *testAgentUpgradeOnDCSuite) TestUpgradeAgentPackage() { // Install the stable MSI artifact s.installPreviousAgentVersion( - installerwindows.WithMSIArg(fmt.Sprintf("DDAGENTUSER_NAME=%s", TestUser)), - installerwindows.WithMSIArg(fmt.Sprintf("DDAGENTUSER_PASSWORD=%s", TestPassword)), + installerwindows.WithMSIArg("DDAGENTUSER_NAME="+TestUser), + installerwindows.WithMSIArg("DDAGENTUSER_PASSWORD="+TestPassword), ) s.AssertSuccessfulAgentPromoteExperiment(s.StableAgentVersion().PackageVersion()) @@ -159,8 +158,8 @@ func (s *testUpgradeWithMissingPasswordSuite) TestUpgradeWithMissingPassword() { options := []installerwindows.MsiOption{ installerwindows.WithOption(installerwindows.WithInstallerURL(s.StableAgentVersion().MSIPackage().URL)), installerwindows.WithMSILogFile("install-previous-version.log"), - installerwindows.WithMSIArg(fmt.Sprintf("DDAGENTUSER_NAME=%s", TestUser)), - installerwindows.WithMSIArg(fmt.Sprintf("DDAGENTUSER_PASSWORD=%s", TestPassword)), + installerwindows.WithMSIArg("DDAGENTUSER_NAME=" + TestUser), + installerwindows.WithMSIArg("DDAGENTUSER_PASSWORD=" + TestPassword), } s.Require().NoError(s.Installer().Install(options...)) s.Require().Host(s.Env().RemoteHost). diff --git a/test/new-e2e/tests/installer/windows/suites/agent-package/gmsa_test.go b/test/new-e2e/tests/installer/windows/suites/agent-package/gmsa_test.go index 47029c61e417de..090a25d5ae2a80 100644 --- a/test/new-e2e/tests/installer/windows/suites/agent-package/gmsa_test.go +++ b/test/new-e2e/tests/installer/windows/suites/agent-package/gmsa_test.go @@ -67,7 +67,7 @@ func (s *testAgentUpgradeOnDCWithGMSASuite) TestUpgradeMSI() { // Install the stable MSI artifact s.installPreviousAgentVersion( - installerwindows.WithMSIArg(fmt.Sprintf("DDAGENTUSER_NAME=%s", TestGMSAUser)), + installerwindows.WithMSIArg("DDAGENTUSER_NAME=" + TestGMSAUser), ) s.AssertSuccessfulAgentPromoteExperiment(s.StableAgentVersion().PackageVersion()) @@ -102,7 +102,7 @@ func (s *testAgentUpgradeOnDCWithGMSASuite) TestUpgradeAgentPackage() { // Install the stable MSI artifact s.installPreviousAgentVersion( - installerwindows.WithMSIArg(fmt.Sprintf("DDAGENTUSER_NAME=%s", TestGMSAUser)), + installerwindows.WithMSIArg("DDAGENTUSER_NAME=" + TestGMSAUser), ) s.AssertSuccessfulAgentPromoteExperiment(s.StableAgentVersion().PackageVersion()) @@ -154,7 +154,7 @@ func createGMSAAccount(host *components.RemoteHost, accountName, domain string) userWithoutSuffix := strings.TrimSuffix(accountName, "$") // Check if the gMSA account already exists - checkCmd := fmt.Sprintf("Get-ADServiceAccount -Identity %s", userWithoutSuffix) + checkCmd := "Get-ADServiceAccount -Identity " + userWithoutSuffix _, err := host.Execute(checkCmd) if err == nil { // Account already exists, skip creation diff --git a/test/new-e2e/tests/installer/windows/suites/apm-inject-package/msi_install_test.go b/test/new-e2e/tests/installer/windows/suites/apm-inject-package/msi_install_test.go index d7eee0e73c0668..d1a48a0a21807b 100644 --- a/test/new-e2e/tests/installer/windows/suites/apm-inject-package/msi_install_test.go +++ b/test/new-e2e/tests/installer/windows/suites/apm-inject-package/msi_install_test.go @@ -6,8 +6,6 @@ package injecttests import ( - "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" winawshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host/windows" installer "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/unix" @@ -40,7 +38,7 @@ func (s *testAgentMSIInstallsAPMInject) TestInstallFromMSI() { installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=host"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_INSTALLER_DEFAULT_PKG_VERSION_DATADOG_APM_INJECT=%s", s.currentAPMInjectVersion.PackageVersion())), + installerwindows.WithMSIArg("DD_INSTALLER_DEFAULT_PKG_VERSION_DATADOG_APM_INJECT="+s.currentAPMInjectVersion.PackageVersion()), installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:3,java:1"), installerwindows.WithMSILogFile("install.log"), ) @@ -58,7 +56,7 @@ func (s *testAgentMSIInstallsAPMInject) TestEnableDisable() { installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=host"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_INSTALLER_DEFAULT_PKG_VERSION_DATADOG_APM_INJECT=%s", s.currentAPMInjectVersion.PackageVersion())), + installerwindows.WithMSIArg("DD_INSTALLER_DEFAULT_PKG_VERSION_DATADOG_APM_INJECT="+s.currentAPMInjectVersion.PackageVersion()), installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:3,java:1"), installerwindows.WithMSILogFile("install.log"), ) @@ -86,7 +84,7 @@ func (s *testAgentMSIInstallsAPMInject) installCurrentAgentVersion(opts ...insta options := []installerwindows.MsiOption{ installerwindows.WithOption(installerwindows.WithInstallerURL(s.CurrentAgentVersion().MSIPackage().URL)), installerwindows.WithMSILogFile("install-current-version.log"), - installerwindows.WithMSIArg(fmt.Sprintf("APIKEY=%s", installer.GetAPIKey())), + installerwindows.WithMSIArg("APIKEY=" + installer.GetAPIKey()), installerwindows.WithMSIArg("SITE=datadoghq.com"), } options = append(options, opts...) diff --git a/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/install_no_iis_test.go b/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/install_no_iis_test.go index 6427236b3fb2cb..ca82ce25a2bf96 100644 --- a/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/install_no_iis_test.go +++ b/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/install_no_iis_test.go @@ -6,8 +6,6 @@ package dotnettests import ( - "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" winawshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/host/windows" installer "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/unix" @@ -46,7 +44,7 @@ func (s *testDotnetLibraryInstallSuiteWithoutIIS) TestMSIInstallDotnetLibraryFai installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=iis"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", version)), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+version), installerwindows.WithMSILogFile("install-rollback.log"), )) defer s.Installer().Purge() diff --git a/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/install_script_test.go b/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/install_script_test.go index 90f2cd1c7c89a8..a4ed68e9a62306 100644 --- a/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/install_script_test.go +++ b/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/install_script_test.go @@ -58,7 +58,7 @@ func (s *testAgentScriptInstallsDotnetLibrary) TestInstallFromScript() { "DD_APM_INSTRUMENTATION_ENABLED": "iis", // TODO: remove override once image is published in prod "DD_INSTALLER_REGISTRY_URL": "install.datad0g.com.internal.dda-testing.com", - "DD_APM_INSTRUMENTATION_LIBRARIES": fmt.Sprintf("dotnet:%s", version.Version()), + "DD_APM_INSTRUMENTATION_LIBRARIES": "dotnet:" + version.Version(), }), ) // Start the IIS app to load the library diff --git a/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/msi_install_test.go b/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/msi_install_test.go index 357938f75821c0..969ef5c82ab965 100644 --- a/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/msi_install_test.go +++ b/test/new-e2e/tests/installer/windows/suites/apm-library-dotnet-package/msi_install_test.go @@ -7,7 +7,6 @@ package dotnettests import ( _ "embed" - "fmt" "os" "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" @@ -61,7 +60,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestInstallFromMSI() { installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=iis"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", version.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+version.Version()), installerwindows.WithMSILogFile("install.log"), ) // Start the IIS app to load the library @@ -88,7 +87,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestUpgradeWithMSI() { // TODO: support DD_INSTALLER_REGISTRY_URL installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), // TODO: update to use Version() when stable is updated - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", oldVersion.PackageVersion())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+oldVersion.PackageVersion()), installerwindows.WithMSILogFile("install.log"), ) @@ -107,7 +106,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestUpgradeWithMSI() { // TODO: remove override once image is published in prod // TODO: support DD_INSTALLER_REGISTRY_URL installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", newVersion.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+newVersion.Version()), installerwindows.WithMSILogFile("upgrade.log"), ) @@ -139,7 +138,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestMSIRollbackRemovesLibrary() { // TODO: remove override once image is published in prod // TODO: support DD_INSTALLER_REGISTRY_URL installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", version.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+version.Version()), installerwindows.WithMSILogFile("install-rollback.log"), installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"), ) @@ -164,7 +163,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestMSISkipRollbackIfInstalled() { // TODO: support DD_INSTALLER_REGISTRY_URL installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), // TODO: update to use Version() when stable is updated - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", oldVersion.PackageVersion())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+oldVersion.PackageVersion()), installerwindows.WithMSILogFile("install.log"), ) @@ -174,7 +173,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestMSISkipRollbackIfInstalled() { // TODO: remove override once image is published in prod // TODO: support DD_INSTALLER_REGISTRY_URL installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", newVersion.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+newVersion.Version()), installerwindows.WithMSILogFile("install-rollback.log"), installerwindows.WithMSIArg("WIXFAILWHENDEFERRED=1"), ) @@ -196,7 +195,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestUninstallKeepsLibrary() { installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=iis"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", version.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+version.Version()), installerwindows.WithMSILogFile("install.log"), ) @@ -226,7 +225,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestUninstallScript() { installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=iis"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", version.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+version.Version()), installerwindows.WithMSILogFile("install.log"), ) // Start the IIS app to load the library @@ -259,7 +258,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestMSIPurge() { installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=iis"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", version.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+version.Version()), installerwindows.WithMSILogFile("install.log"), ) // Start the IIS app to load the library @@ -295,7 +294,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestMSIPurgeDisabled() { installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=iis"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", version.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+version.Version()), installerwindows.WithMSILogFile("install.log"), ) // Start the IIS app to load the library @@ -334,7 +333,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) TestDisableEnableScript() { installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_ENABLED=iis"), // TODO: remove override once image is published in prod installerwindows.WithMSIArg("DD_INSTALLER_REGISTRY_URL=install.datad0g.com.internal.dda-testing.com"), - installerwindows.WithMSIArg(fmt.Sprintf("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:%s", version.Version())), + installerwindows.WithMSIArg("DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:"+version.Version()), installerwindows.WithMSILogFile("install.log"), ) // Start the IIS app to load the library @@ -371,7 +370,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) installPreviousAgentVersion(opts ... options := []installerwindows.MsiOption{ installerwindows.WithOption(installerwindows.WithInstallerURL(s.StableAgentVersion().MSIPackage().URL)), installerwindows.WithMSILogFile("install-previous-version.log"), - installerwindows.WithMSIArg(fmt.Sprintf("APIKEY=%s", installer.GetAPIKey())), + installerwindows.WithMSIArg("APIKEY=" + installer.GetAPIKey()), installerwindows.WithMSIArg("SITE=datadoghq.com"), } options = append(options, opts...) @@ -392,7 +391,7 @@ func (s *testAgentMSIInstallsDotnetLibrary) installCurrentAgentVersion(opts ...i options := []installerwindows.MsiOption{ installerwindows.WithOption(installerwindows.WithInstallerURL(s.CurrentAgentVersion().MSIPackage().URL)), installerwindows.WithMSILogFile("install-current-version.log"), - installerwindows.WithMSIArg(fmt.Sprintf("APIKEY=%s", installer.GetAPIKey())), + installerwindows.WithMSIArg("APIKEY=" + installer.GetAPIKey()), installerwindows.WithMSIArg("SITE=datadoghq.com"), } options = append(options, opts...) diff --git a/test/new-e2e/tests/language-detection/language_detection_test.go b/test/new-e2e/tests/language-detection/language_detection_test.go index b1286e8e52e521..086bdaad29d652 100644 --- a/test/new-e2e/tests/language-detection/language_detection_test.go +++ b/test/new-e2e/tests/language-detection/language_detection_test.go @@ -72,7 +72,7 @@ func (s *languageDetectionSuite) SetupSuite() { } func (s *languageDetectionSuite) checkDetectedLanguage(pid string, language string, source string) { - s.Env().RemoteHost.MustExecute(fmt.Sprintf("kill -0 %s", pid)) // check PID refers to an existing, signalable process + s.Env().RemoteHost.MustExecute("kill -0 " + pid) // check PID refers to an existing, signalable process var actualLanguage string var err error @@ -86,7 +86,7 @@ func (s *languageDetectionSuite) checkDetectedLanguage(pid string, language stri pid, language, actualLanguage, err), ) - s.Env().RemoteHost.MustExecute(fmt.Sprintf("kill -SIGTERM %s", pid)) + s.Env().RemoteHost.MustExecute("kill -SIGTERM " + pid) } func (s *languageDetectionSuite) getLanguageForPid(pid string, source string) (string, error) { diff --git a/test/new-e2e/tests/ndm/snmp/snmp_test.go b/test/new-e2e/tests/ndm/snmp/snmp_test.go index 8d44b3d3b373c1..8cb01eaadd3a99 100644 --- a/test/new-e2e/tests/ndm/snmp/snmp_test.go +++ b/test/new-e2e/tests/ndm/snmp/snmp_test.go @@ -8,7 +8,6 @@ package snmp import ( "embed" - "fmt" "path" "testing" "time" @@ -180,7 +179,7 @@ func (s *snmpDockerSuite) TestSnmpTagsAreStoredOnRestart() { _, err = s.Env().RemoteHost.Execute("docker stop dd-snmp") require.NoError(s.T(), err) - _, err = s.Env().RemoteHost.Execute(fmt.Sprintf("docker restart %s", s.Env().Agent.ContainerName)) + _, err = s.Env().RemoteHost.Execute("docker restart " + s.Env().Agent.ContainerName) require.NoError(s.T(), err) err = fakeintake.FlushServerAndResetAggregators() diff --git a/test/new-e2e/tests/netpath/network-path-integration/common_test.go b/test/new-e2e/tests/netpath/network-path-integration/common_test.go index f0f0deb368dbe5..ec07417da33e27 100644 --- a/test/new-e2e/tests/netpath/network-path-integration/common_test.go +++ b/test/new-e2e/tests/netpath/network-path-integration/common_test.go @@ -9,6 +9,7 @@ package networkpathintegration import ( _ "embed" "encoding/json" + "errors" "fmt" "os" "time" @@ -63,7 +64,7 @@ func (s *baseNetworkPathIntegrationTestSuite) findNetpath(isMatch func(*aggregat return nil, err } if nps == nil { - return nil, fmt.Errorf("GetLatestNetpathEvents() returned nil netpaths") + return nil, errors.New("GetLatestNetpathEvents() returned nil netpaths") } var match *aggregator.Netpath diff --git a/test/new-e2e/tests/npm/cilium_lb_conntracker_test.go b/test/new-e2e/tests/npm/cilium_lb_conntracker_test.go index 30a84ebecb6e31..19482d7739c143 100644 --- a/test/new-e2e/tests/npm/cilium_lb_conntracker_test.go +++ b/test/new-e2e/tests/npm/cilium_lb_conntracker_test.go @@ -8,7 +8,6 @@ package npm import ( "context" "encoding/json" - "fmt" "strings" "testing" "time" @@ -42,7 +41,7 @@ func TestCiliumLBConntracker(t *testing.T) { // TODO: find a way to update this list dynamically versionsToTest := []string{"1.15.17", "1.16.10", "1.17.4"} for _, v := range versionsToTest { - t.Run(fmt.Sprintf("version %s", v), func(_t *testing.T) { + t.Run("version "+v, func(_t *testing.T) { _t.Parallel() testCiliumLBConntracker(t, v) @@ -79,9 +78,9 @@ func testCiliumLBConntracker(t *testing.T, ciliumVersion string) { }, } - name := strings.ReplaceAll(fmt.Sprintf("cilium-lb-%s", ciliumVersion), ".", "-") + name := strings.ReplaceAll("cilium-lb-"+ciliumVersion, ".", "-") e2e.Run(t, suite, - e2e.WithStackName(fmt.Sprintf("stack-%s", name)), + e2e.WithStackName("stack-"+name), e2e.WithProvisioner( awskubernetes.KindProvisioner( awskubernetes.WithName(name), diff --git a/test/new-e2e/tests/orchestrator/k8s_test.go b/test/new-e2e/tests/orchestrator/k8s_test.go index 94c953aff0fb2b..d5ac2e953e0bc1 100644 --- a/test/new-e2e/tests/orchestrator/k8s_test.go +++ b/test/new-e2e/tests/orchestrator/k8s_test.go @@ -8,7 +8,6 @@ package orchestrator import ( "context" _ "embed" - "fmt" "strings" "testing" "time" @@ -66,7 +65,7 @@ func (suite *k8sSuite) TestNode() { expectAtLeastOneResource{ filter: &fakeintake.PayloadFilter{ResourceType: agentmodel.TypeCollectorNode}, test: func(payload *aggregator.OrchestratorPayload) bool { - return payload.Node.Metadata.Name == fmt.Sprintf("%s-control-plane", suite.Env().KubernetesCluster.ClusterName) + return payload.Node.Metadata.Name == suite.Env().KubernetesCluster.ClusterName+"-control-plane" }, message: "find a control plane node", timeout: defaultTimeout, diff --git a/test/new-e2e/tests/process/ecs_test.go b/test/new-e2e/tests/process/ecs_test.go index 1644821d0f1406..0ed09b1820ac6e 100644 --- a/test/new-e2e/tests/process/ecs_test.go +++ b/test/new-e2e/tests/process/ecs_test.go @@ -6,7 +6,7 @@ package process import ( - "fmt" + "strconv" "testing" "time" @@ -47,7 +47,7 @@ func ecsEC2CPUStressProvisioner(runInCoreAgent bool) provisioners.PulumiEnvRunFu ecs.WithECSOptions(tifEcs.WithLinuxNodeGroup()), ecs.WithAgentOptions( ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_PROCESS_COLLECTION_ENABLED", "true"), - ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED", fmt.Sprintf("%t", runInCoreAgent)), + ecsagentparams.WithAgentServiceEnvVariable("DD_PROCESS_CONFIG_RUN_IN_CORE_AGENT_ENABLED", strconv.FormatBool(runInCoreAgent)), ), ecs.WithWorkloadApp(func(e aws.Environment, clusterArn pulumi.StringInput) (*ecsComp.Workload, error) { return cpustress.EcsAppDefinition(e, clusterArn) diff --git a/test/new-e2e/tests/process/testing.go b/test/new-e2e/tests/process/testing.go index 8e138ea3e4d43b..a93f2b666f16ca 100644 --- a/test/new-e2e/tests/process/testing.go +++ b/test/new-e2e/tests/process/testing.go @@ -9,7 +9,6 @@ package process import ( _ "embed" "encoding/json" - "fmt" "strings" "testing" @@ -334,7 +333,7 @@ func assertContainersNotCollected(t *testing.T, payloads []*aggregator.ProcessPa // containers and whether it has the expected data populated func findContainer(name string, containers []*agentmodel.Container) bool { // check if there is a tag for the container. The tag could be `container_name:*` or `short_image:*` - containerNameTag := fmt.Sprintf(":%s", name) + containerNameTag := ":" + name for _, container := range containers { for _, tag := range container.Tags { if strings.HasSuffix(tag, containerNameTag) { diff --git a/test/new-e2e/tests/process/windows_test.go b/test/new-e2e/tests/process/windows_test.go index b0b3d4d8795216..666e485b134864 100644 --- a/test/new-e2e/tests/process/windows_test.go +++ b/test/new-e2e/tests/process/windows_test.go @@ -321,5 +321,5 @@ func runWindowsCommand(t *testing.T, remoteHost *components.RemoteHost, cmd []st _ = session.Close() _ = stdin.Close() }) - return fmt.Sprintf("%s.exe", cmd[0]), nil + return cmd[0] + ".exe", nil } diff --git a/test/new-e2e/tests/sysprobe-functional/apmtags_test.go b/test/new-e2e/tests/sysprobe-functional/apmtags_test.go index f7baf1b509a0e6..89b5c91e7ba3a2 100644 --- a/test/new-e2e/tests/sysprobe-functional/apmtags_test.go +++ b/test/new-e2e/tests/sysprobe-functional/apmtags_test.go @@ -320,16 +320,16 @@ func (v *apmvmSuite) TestUSMAutoTaggingSuite() { if test.targetPath != "" { targetpath = test.targetPath } - var envstring string + var envstringBuilder strings.Builder for k, v := range test.clientEnvVars { - envstring += fmt.Sprintf("$Env:%s=\"%s\" ; ", k, v) + fmt.Fprintf(&envstringBuilder, "$Env:%s=\"%s\" ; ", k, v) } - localcmd := fmt.Sprintf(pscommand, envstring, testScript, targetport, targetpath, strings.Join(test.expectedClientTags, ","), strings.Join(test.expectedServerTags, ","), testExe) + localcmd := fmt.Sprintf(pscommand, envstringBuilder.String(), testScript, targetport, targetpath, strings.Join(test.expectedClientTags, ","), strings.Join(test.expectedServerTags, ","), testExe) if len(test.clientEnvVars) > 0 { - var envarg string + var envargBuilder strings.Builder for k, v := range test.clientEnvVars { - envarg += fmt.Sprintf("%s=%s", k, v) + fmt.Fprintf(&envargBuilder, "%s=%s", k, v) } } diff --git a/test/new-e2e/tests/windows/common/agent/agent.go b/test/new-e2e/tests/windows/common/agent/agent.go index a46742b613a3e9..ce795f5973a30a 100644 --- a/test/new-e2e/tests/windows/common/agent/agent.go +++ b/test/new-e2e/tests/windows/common/agent/agent.go @@ -7,6 +7,7 @@ package agent import ( + "errors" "fmt" "os" "path/filepath" @@ -69,11 +70,11 @@ func InstallAgent(host *components.RemoteHost, options ...InstallAgentOption) (s } if p.Package == nil { - return "", fmt.Errorf("missing agent package to install") + return "", errors.New("missing agent package to install") } if p.InstallLogFile != "" { // InstallMSI always used a temporary file path - return "", fmt.Errorf("Setting the remote MSI log file path is not supported") + return "", errors.New("Setting the remote MSI log file path is not supported") } if p.LocalInstallLogFile == "" { diff --git a/test/new-e2e/tests/windows/common/agent/package.go b/test/new-e2e/tests/windows/common/agent/package.go index cbd234ef396a62..0382aa8de28201 100644 --- a/test/new-e2e/tests/windows/common/agent/package.go +++ b/test/new-e2e/tests/windows/common/agent/package.go @@ -7,6 +7,7 @@ package agent import ( + "errors" "fmt" "os" "strings" @@ -169,7 +170,7 @@ func GetPipelineMSIURL(pipelineID string, majorVersion string, arch string, flav // Not all pipelines include the pipeline ID in the artifact name, but if it is there then match against it if strings.Contains(artifact, "pipeline.") && - !strings.Contains(artifact, fmt.Sprintf("pipeline.%s", pipelineID)) { + !strings.Contains(artifact, "pipeline."+pipelineID) { return false } if !strings.Contains(artifact, fmt.Sprintf("-%s.msi", arch)) { @@ -410,10 +411,10 @@ func GetLastStablePackageFromEnv() (*Package, error) { flavor, _ := LookupFlavorFromEnv() ver := os.Getenv("LAST_STABLE_VERSION") if ver == "" { - return nil, fmt.Errorf("LAST_STABLE_VERSION is not set") + return nil, errors.New("LAST_STABLE_VERSION is not set") } // TODO: Append -1, should we update release.json to include it? - ver = fmt.Sprintf("%s-1", ver) + ver = ver + "-1" var err error @@ -476,7 +477,7 @@ func GetUpgradeTestPackageFromEnv() (*Package, error) { } // if not in pipeline or provided in env, then fail - return nil, fmt.Errorf("no upgradable package found") + return nil, errors.New("no upgradable package found") } // PackageOption defines a function type for modifying a Package @@ -593,10 +594,10 @@ func WithURLFromPipeline(pipelineID string) PackageOption { func WithURLFromInstallersJSON(jsonURL, version string) PackageOption { return func(p *Package) error { if p.Product == "" { - return fmt.Errorf("product must be set before calling WithURLFromInstallersJSON") + return errors.New("product must be set before calling WithURLFromInstallersJSON") } if p.Arch == "" { - return fmt.Errorf("arch must be set before calling WithURLFromInstallersJSON") + return errors.New("arch must be set before calling WithURLFromInstallersJSON") } url, err := installers.GetProductURL(jsonURL, p.Product, version, p.Arch) if err != nil { @@ -636,27 +637,27 @@ func WithURLFromInstallersJSON(jsonURL, version string) PackageOption { // export CURRENT_AGENT_MSI_URL="https://s3.amazonaws.com/dd-agent-mstesting/builds/beta/ddagent-cli-7.64.0-rc.9.msi" func WithDevEnvOverrides(devenvPrefix string) PackageOption { return func(p *Package) error { - if flavor, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_FLAVOR", devenvPrefix)); ok { + if flavor, ok := os.LookupEnv(devenvPrefix + "_MSI_FLAVOR"); ok { if err := WithFlavor(flavor)(p); err != nil { return err } } - if product, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_PRODUCT", devenvPrefix)); ok { + if product, ok := os.LookupEnv(devenvPrefix + "_MSI_PRODUCT"); ok { if err := WithProduct(product)(p); err != nil { return err } } - if arch, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_ARCH", devenvPrefix)); ok { + if arch, ok := os.LookupEnv(devenvPrefix + "_MSI_ARCH"); ok { if err := WithArch(arch)(p); err != nil { return err } } - if channel, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_CHANNEL", devenvPrefix)); ok { + if channel, ok := os.LookupEnv(devenvPrefix + "_MSI_CHANNEL"); ok { if err := WithChannel(channel)(p); err != nil { return err } } - if version, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_VERSION", devenvPrefix)); ok { + if version, ok := os.LookupEnv(devenvPrefix + "_MSI_VERSION"); ok { if p.Channel == "" { channel := stableChannel // if channel is not provided, check if we can infer it from the version, @@ -672,19 +673,19 @@ func WithDevEnvOverrides(devenvPrefix string) PackageOption { if err != nil { return err } - if customJSONURL, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_JSON_URL", devenvPrefix)); ok { + if customJSONURL, ok := os.LookupEnv(devenvPrefix + "_MSI_JSON_URL"); ok { jsonURL = customJSONURL } if err := WithURLFromInstallersJSON(jsonURL, version)(p); err != nil { return err } } - if url, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_URL", devenvPrefix)); ok { + if url, ok := os.LookupEnv(devenvPrefix + "_MSI_URL"); ok { if err := WithURL(url)(p); err != nil { return err } } - if pipelineID, ok := os.LookupEnv(fmt.Sprintf("%s_MSI_PIPELINE", devenvPrefix)); ok { + if pipelineID, ok := os.LookupEnv(devenvPrefix + "_MSI_PIPELINE"); ok { if err := WithURLFromPipeline(pipelineID)(p); err != nil { return err } diff --git a/test/new-e2e/tests/windows/common/crashdump.go b/test/new-e2e/tests/windows/common/crashdump.go index 055487cfcd1941..c5f67ff04a0c8e 100644 --- a/test/new-e2e/tests/windows/common/crashdump.go +++ b/test/new-e2e/tests/windows/common/crashdump.go @@ -201,20 +201,21 @@ func DownloadSystemCrashDump(host *components.RemoteHost, systemCrashDumpFile st // EnableDriverVerifier enables standard verifier checks on the specified kernel drivers. Requires a reboot. func EnableDriverVerifier(host *components.RemoteHost, kernelDrivers []string) (string, error) { - var driverList string + var driverListBuilder strings.Builder for _, driverName := range kernelDrivers { if !strings.HasSuffix(driverName, ".sys") { - driverList += fmt.Sprintf("%s.sys ", driverName) + driverListBuilder.WriteString(driverName + ".sys ") } else { - driverList += fmt.Sprintf("%s ", driverName) + driverListBuilder.WriteString(driverName + " ") } } + driverList := driverListBuilder.String() fmt.Println("Enabling driver verifier for: ", driverList) // Driver verifier returns an error code of 2. - out, err := host.Execute(fmt.Sprintf("verifier /standard /driver %s", driverList)) + out, err := host.Execute("verifier /standard /driver " + driverList) out = strings.TrimSpace(out) return out, err @@ -256,7 +257,7 @@ func waitForRebootFunc(host *components.RemoteHost, b backoff.BackOff, rebootFun bootTime := strings.TrimSpace(out) fmt.Println("current boot time:", bootTime) if bootTime == lastBootTime { - return fmt.Errorf("boot time has not changed") + return errors.New("boot time has not changed") } return nil }, b) diff --git a/test/new-e2e/tests/windows/common/defender.go b/test/new-e2e/tests/windows/common/defender.go index 46f93209372a7c..9be039f4fd93cd 100644 --- a/test/new-e2e/tests/windows/common/defender.go +++ b/test/new-e2e/tests/windows/common/defender.go @@ -6,10 +6,12 @@ package common import ( + "errors" "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/powershell" "strings" + "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/powershell" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" ) @@ -27,7 +29,7 @@ func DisableDefender(host *components.RemoteHost) error { return err } if protected { - return fmt.Errorf("Windows Defender is tamper protected, unable to modify settings") + return errors.New("Windows Defender is tamper protected, unable to modify settings") } _, err = powershell.PsHost().DisableWindowsDefender().Execute(host) diff --git a/test/new-e2e/tests/windows/common/filesystem_snapshot.go b/test/new-e2e/tests/windows/common/filesystem_snapshot.go index ce239668f62855..2cd9808a0d44fa 100644 --- a/test/new-e2e/tests/windows/common/filesystem_snapshot.go +++ b/test/new-e2e/tests/windows/common/filesystem_snapshot.go @@ -73,10 +73,11 @@ func NewFileSystemSnapshot(host *components.RemoteHost, pathsToIgnore []string) } // quote each path and join with commas - pattern := "" + var patternBuilder strings.Builder for _, ignorePath := range pathsToIgnore { - pattern += fmt.Sprintf(`'%s',`, ignorePath) + fmt.Fprintf(&patternBuilder, `'%s',`, ignorePath) } + pattern := patternBuilder.String() // PowerShell list syntax pattern = fmt.Sprintf(`@(%s)`, strings.Trim(pattern, ",")) diff --git a/test/new-e2e/tests/windows/common/product.go b/test/new-e2e/tests/windows/common/product.go index ba22fbdd83dc08..1fb32d6c91ccf5 100644 --- a/test/new-e2e/tests/windows/common/product.go +++ b/test/new-e2e/tests/windows/common/product.go @@ -6,6 +6,7 @@ package common import ( + "errors" "fmt" "strings" @@ -45,7 +46,7 @@ func GetProductVersionByName(host *components.RemoteHost, name string) (string, } val = strings.TrimSpace(val) if val == "" { - return "", fmt.Errorf("display version not found") + return "", errors.New("display version not found") } return val, nil } diff --git a/test/new-e2e/tests/windows/common/proxy.go b/test/new-e2e/tests/windows/common/proxy.go index 2f2aad0d60956f..1970e333e2a340 100644 --- a/test/new-e2e/tests/windows/common/proxy.go +++ b/test/new-e2e/tests/windows/common/proxy.go @@ -6,6 +6,7 @@ package common import ( + "errors" "fmt" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" @@ -65,7 +66,7 @@ func BlockAllOutboundExceptProxy(host *components.RemoteHost, proxyIP string, po // Ensure outbound is blocked (a generic external call should fail) _, err = host.Execute(`curl.exe https://google.com`) if err == nil { - return fmt.Errorf("outbound is not blocked") + return errors.New("outbound is not blocked") } return nil @@ -85,7 +86,7 @@ func ResetOutboundPolicyAndRemoveProxyRules(host *components.RemoteHost) error { // Ensure outbound is allowed _, err = host.Execute(`curl.exe https://google.com`) if err != nil { - return fmt.Errorf("outbound is not allowed") + return errors.New("outbound is not allowed") } return nil diff --git a/test/new-e2e/tests/windows/common/registry.go b/test/new-e2e/tests/windows/common/registry.go index 7d5075db0bbdbc..d36d7d8967905a 100644 --- a/test/new-e2e/tests/windows/common/registry.go +++ b/test/new-e2e/tests/windows/common/registry.go @@ -7,6 +7,7 @@ package common import ( "fmt" + "strconv" "strings" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" @@ -41,7 +42,7 @@ func DeleteRegistryKey(host *components.RemoteHost, path string) error { // SetRegistryDWORDValue sets, creating if necessary, a DWORD value at the specified path func SetRegistryDWORDValue(host *components.RemoteHost, path string, name string, value int) error { - return SetTypedRegistryValue(host, path, name, fmt.Sprintf("%d", value), "DWORD") + return SetTypedRegistryValue(host, path, name, strconv.Itoa(value), "DWORD") } // SetTypedRegistryValue sets, creating if necessary, the value at the specified path with the specified type @@ -67,7 +68,7 @@ func SetRegistryMultiString(host *components.RemoteHost, path string, name strin // SetNewItemDWORDProperty sets a DWORD value at the specified path func SetNewItemDWORDProperty(host *components.RemoteHost, path string, name string, value int) error { - return SetNewItemProperty(host, path, name, fmt.Sprintf("%d", value), "DWORD") + return SetNewItemProperty(host, path, name, strconv.Itoa(value), "DWORD") } // SetNewItemProperty sets a new item property on the remote host diff --git a/test/new-e2e/tests/windows/common/service.go b/test/new-e2e/tests/windows/common/service.go index 37290b45c57669..76610c4373ded7 100644 --- a/test/new-e2e/tests/windows/common/service.go +++ b/test/new-e2e/tests/windows/common/service.go @@ -7,6 +7,7 @@ package common import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -78,7 +79,7 @@ func (s *ServiceConfig) UnmarshalJSON(b []byte) error { // FetchUserSID fetches the SID for the service user func (s *ServiceConfig) FetchUserSID(host *components.RemoteHost) error { if s.UserName == "" { - return fmt.Errorf("UserName is not set") + return errors.New("UserName is not set") } var err error sid, err := GetServiceAliasSID(s.UserName) @@ -225,7 +226,7 @@ func GetServicePID(host *components.RemoteHost, service string) (int, error) { // GetServiceImagePath returns the image path (command line) of the service func GetServiceImagePath(host *components.RemoteHost, service string) (string, error) { - return GetRegistryValue(host, fmt.Sprintf("HKLM:\\SYSTEM\\CurrentControlSet\\Services\\%s", service), "ImagePath") + return GetRegistryValue(host, "HKLM:\\SYSTEM\\CurrentControlSet\\Services\\"+service, "ImagePath") } // IsUserModeServiceType returns true if the service is a user mode service diff --git a/test/new-e2e/tests/windows/common/user.go b/test/new-e2e/tests/windows/common/user.go index e919019782d828..14afdb85739999 100644 --- a/test/new-e2e/tests/windows/common/user.go +++ b/test/new-e2e/tests/windows/common/user.go @@ -201,7 +201,7 @@ func GetUserRights(host *components.RemoteHost) (map[string][]string, error) { if err != nil { return nil, err } - cmd := fmt.Sprintf(`secedit /export /areas USER_RIGHTS /cfg %s`, outFile) + cmd := "secedit /export /areas USER_RIGHTS /cfg " + outFile _, err = host.Execute(cmd) if err != nil { return nil, err diff --git a/test/new-e2e/tests/windows/fips-test/fips_test.go b/test/new-e2e/tests/windows/fips-test/fips_test.go index 1d125f4c4f4ec6..6c76e30fd0812e 100644 --- a/test/new-e2e/tests/windows/fips-test/fips_test.go +++ b/test/new-e2e/tests/windows/fips-test/fips_test.go @@ -106,9 +106,9 @@ func (s *fipsAgentSuite) TestOpenSSLPaths() { // assert openssl winctx registry keys exist // https://github.com/openssl/openssl/blob/master/NOTES-WINDOWS.md#installation-directories expectedOpenSSLPaths := map[string]string{ - "OPENSSLDIR": fmt.Sprintf(`%sembedded3\ssl`, s.installPath), - "ENGINESDIR": fmt.Sprintf(`%sembedded3\lib\engines-3`, s.installPath), - "MODULESDIR": fmt.Sprintf(`%sembedded3\lib\ossl-modules`, s.installPath), + "OPENSSLDIR": s.installPath + "embedded3\\ssl", + "ENGINESDIR": s.installPath + "embedded3\\lib\\engines-3", + "MODULESDIR": s.installPath + "embedded3\\lib\\ossl-modules", } // TODO: How to configure the version of OpenSSL? opensslVersion := "3.5" diff --git a/test/new-e2e/tests/windows/install-test/agent_user_test.go b/test/new-e2e/tests/windows/install-test/agent_user_test.go index dc58e05ea776e8..afe53d6b26a3e9 100644 --- a/test/new-e2e/tests/windows/install-test/agent_user_test.go +++ b/test/new-e2e/tests/windows/install-test/agent_user_test.go @@ -6,14 +6,13 @@ package installtest import ( - "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" - "github.com/stretchr/testify/suite" "testing" + + "github.com/stretchr/testify/suite" ) type agentUserTestCase interface { @@ -130,7 +129,7 @@ func TestAgentUser(t *testing.T) { staticAgentUserTestCase{name: "hostname_user"}, func(tc *agentUserTestCaseWithHostInfo, hostInfo *windowsCommon.HostInfo) { h := windowsCommon.NameToNetBIOSName(hostInfo.Hostname) - tc.username = fmt.Sprintf("%s\\testuser", h) + tc.username = h + "\\testuser" tc.expectedDomain = h tc.expectedUser = "testuser" }}, diff --git a/test/new-e2e/tests/windows/install-test/base.go b/test/new-e2e/tests/windows/install-test/base.go index ba4f68158f05c4..a911b7eba67aaf 100644 --- a/test/new-e2e/tests/windows/install-test/base.go +++ b/test/new-e2e/tests/windows/install-test/base.go @@ -7,7 +7,6 @@ package installtest import ( - "fmt" "os" "path/filepath" "strings" @@ -71,7 +70,7 @@ func (s *baseAgentMSISuite) AfterTest(suiteName, testName string) { for _, logName := range []string{"System", "Application"} { // collect the full event log as an evtx file s.T().Logf("Exporting %s event log", logName) - outputPath := filepath.Join(s.SessionOutputDir(), fmt.Sprintf("%s.evtx", logName)) + outputPath := filepath.Join(s.SessionOutputDir(), logName+".evtx") err := windowsCommon.ExportEventLog(vm, logName, outputPath) s.Assert().NoError(err, "should export %s event log", logName) // Log errors and warnings to the screen for easy access @@ -106,7 +105,7 @@ func (s *baseAgentMSISuite) installAgentPackage(vm *components.RemoteHost, agent windowsAgent.WithValidAPIKey(), } installOpts = append(installOpts, installOptions...) - if !s.Run(fmt.Sprintf("install %s", agentPackage.AgentVersion()), func() { + if !s.Run("install "+agentPackage.AgentVersion(), func() { remoteMSIPath, err = s.InstallAgent(vm, installOpts...) s.Require().NoError(err, "should install agent %s", agentPackage.AgentVersion()) }) { @@ -281,11 +280,11 @@ func Run[Env any](t *testing.T, s e2e.Suite[Env]) { // if running locally and not in dev mode, run tests in parallel t.Parallel() // use a UUID to generate a unique name for the stack - opts = append(opts, e2e.WithStackName(fmt.Sprintf("windows-msi-test-%s", uuid.NewString()))) + opts = append(opts, e2e.WithStackName("windows-msi-test-"+uuid.NewString())) } // Include the agent major version in the test name so junit reports will differentiate the tests - t.Run(fmt.Sprintf("Agent v%s", majorVersion), func(t *testing.T) { + t.Run("Agent v"+majorVersion, func(t *testing.T) { e2e.Run(t, s, opts...) }) } diff --git a/test/new-e2e/tests/windows/install-test/install_test.go b/test/new-e2e/tests/windows/install-test/install_test.go index 58228a7e70ab7e..640e844121e5db 100644 --- a/test/new-e2e/tests/windows/install-test/install_test.go +++ b/test/new-e2e/tests/windows/install-test/install_test.go @@ -6,8 +6,8 @@ package installtest import ( - "fmt" "path/filepath" + "strconv" "strings" "time" @@ -351,7 +351,7 @@ func (s *testInstallOptsSuite) TestInstallOpts() { windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "install.log")), windowsAgent.WithTags("k1:v1,k2:v2"), windowsAgent.WithHostname("win-installopts"), - windowsAgent.WithCmdPort(fmt.Sprintf("%d", cmdPort)), + windowsAgent.WithCmdPort(strconv.Itoa(cmdPort)), windowsAgent.WithProxyHost("proxy.foo.com"), windowsAgent.WithProxyPort("1234"), windowsAgent.WithProxyUser("puser"), @@ -475,7 +475,7 @@ func (s *testInstallFailSuite) TestInstallFail() { vm := s.Env().RemoteHost // run installer with failure flag - if !s.Run(fmt.Sprintf("install %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("install "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithValidAPIKey(), diff --git a/test/new-e2e/tests/windows/install-test/installtester.go b/test/new-e2e/tests/windows/install-test/installtester.go index bb28337239b52c..4bdee5b94eae41 100644 --- a/test/new-e2e/tests/windows/install-test/installtester.go +++ b/test/new-e2e/tests/windows/install-test/installtester.go @@ -6,7 +6,7 @@ package installtest import ( - "fmt" + "errors" "io/fs" "path/filepath" "slices" @@ -72,7 +72,7 @@ func NewTester(context utilscommon.Context, host *components.RemoteHost, opts .. } if t.expectedAgentVersion == "" { - return nil, fmt.Errorf("expectedAgentVersion is required") + return nil, errors.New("expectedAgentVersion is required") } // Ensure the expected version is well formed @@ -619,7 +619,7 @@ func (t *Tester) testInstalledFilePermissions(tt *testing.T, ddAgentUserIdentity // TestInstallExpectations tests the current agent installation meets the expectations provided to the Tester func (t *Tester) TestInstallExpectations(tt *testing.T) bool { - return tt.Run(fmt.Sprintf("test %s", t.agentPackage.AgentVersion()), func(tt *testing.T) { + return tt.Run("test "+t.agentPackage.AgentVersion(), func(tt *testing.T) { if !tt.Run("running expected agent version", func(tt *testing.T) { installedVersion, err := t.InstallTestClient.GetAgentVersion() require.NoError(tt, err, "should get agent version") diff --git a/test/new-e2e/tests/windows/install-test/npm_test.go b/test/new-e2e/tests/windows/install-test/npm_test.go index b674713aafa659..7a06552712ed2c 100644 --- a/test/new-e2e/tests/windows/install-test/npm_test.go +++ b/test/new-e2e/tests/windows/install-test/npm_test.go @@ -6,7 +6,6 @@ package installtest import ( - "fmt" "path/filepath" "time" @@ -216,14 +215,14 @@ func (s *testNPMInstallSuite) upgradeAgent(host *components.RemoteHost, agentPac windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), } installOpts = append(installOpts, options...) - if !s.Run(fmt.Sprintf("upgrade to %s", agentPackage.AgentVersion()), func() { + if !s.Run("upgrade to "+agentPackage.AgentVersion(), func() { _, err := s.InstallAgent(host, installOpts...) s.Require().NoError(err, "should upgrade to agent %s", agentPackage.AgentVersion()) }) { s.T().FailNow() } - if !s.Run(fmt.Sprintf("test %s", agentPackage.AgentVersion()), func() { + if !s.Run("test "+agentPackage.AgentVersion(), func() { client := s.NewTestClientForHost(host) RequireAgentVersionRunningWithNoErrors(s.T(), client, agentPackage.AgentVersion()) }) { diff --git a/test/new-e2e/tests/windows/install-test/persisting_integrations_test.go b/test/new-e2e/tests/windows/install-test/persisting_integrations_test.go index 7bfd3ed4c2e1d1..f0f64d54c54942 100644 --- a/test/new-e2e/tests/windows/install-test/persisting_integrations_test.go +++ b/test/new-e2e/tests/windows/install-test/persisting_integrations_test.go @@ -45,7 +45,7 @@ func (s *testPersistingIntegrationsSuite) TestPersistingIntegrations() { vm := s.Env().RemoteHost // install current version - if !s.Run(fmt.Sprintf("install %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("install "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "install.log")), @@ -68,7 +68,7 @@ func (s *testPersistingIntegrationsSuite) TestPersistingIntegrations() { s.Require().NoError(err, "should install pip package") // upgrade to test agent - if !s.Run(fmt.Sprintf("upgrade to %s", s.upgradeAgentPackge.AgentVersion()), func() { + if !s.Run("upgrade to "+s.upgradeAgentPackge.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.upgradeAgentPackge), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), @@ -126,7 +126,7 @@ func (s *testDisablePersistingIntegrationsSuite) TestDisablePersistingIntegratio vm := s.Env().RemoteHost // install current version - if !s.Run(fmt.Sprintf("install %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("install "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "install.log")), @@ -151,7 +151,7 @@ func (s *testDisablePersistingIntegrationsSuite) TestDisablePersistingIntegratio // upgrade to test agent // with the integrations persistence flag disabled - if !s.Run(fmt.Sprintf("upgrade to %s", s.upgradeAgentPackge.AgentVersion()), func() { + if !s.Run("upgrade to "+s.upgradeAgentPackge.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.upgradeAgentPackge), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), @@ -254,7 +254,7 @@ func (s *testIntegrationFolderPermissions) TestIntegrationFolderPermissions() { vm := s.Env().RemoteHost // install current version - if !s.Run(fmt.Sprintf("install %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("install "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "install.log")), @@ -350,7 +350,7 @@ func (s *testIntegrationRollback) TestIntegrationRollback() { vm := s.Env().RemoteHost // install current version - if !s.Run(fmt.Sprintf("install %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("install "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "install.log")), @@ -417,7 +417,7 @@ func (s *testIntegrationRollback) TestIntegrationRollback() { s.checkIntegrationInstall(vm, thirdPartyIntegration) // upgrade again without failure - if !s.Run(fmt.Sprintf("upgrade to %s", s.upgradeAgentPackge.AgentVersion()), func() { + if !s.Run("upgrade to "+s.upgradeAgentPackge.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.upgradeAgentPackge), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), @@ -455,7 +455,7 @@ func (s *testPersistingIntegrationsDuringUninstall) TestPersistingIntegrationsDu vm := s.Env().RemoteHost // install current version - if !s.Run(fmt.Sprintf("install %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("install "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "install.log")), @@ -483,7 +483,7 @@ func (s *testPersistingIntegrationsDuringUninstall) TestPersistingIntegrationsDu ) // upgrade to test agent - if !s.Run(fmt.Sprintf("upgrade to %s", s.upgradeAgentPackge.AgentVersion()), func() { + if !s.Run("upgrade to "+s.upgradeAgentPackge.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.upgradeAgentPackge), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), @@ -560,7 +560,7 @@ func (s *baseAgentMSISuite) checkPipPackageInstalled(vm *components.RemoteHost, s.Require().NoError(err, "should show pip package") // check to make sure it is installed - packageCheck := fmt.Sprintf("Name: %s", packageToCheck) + packageCheck := "Name: " + packageToCheck assert.True(s.T(), strings.Contains(out, packageCheck), "pip package should be installed") } diff --git a/test/new-e2e/tests/windows/install-test/service-test/tester.go b/test/new-e2e/tests/windows/install-test/service-test/tester.go index 6a8cfdb777bb71..efa77db8e026fe 100644 --- a/test/new-e2e/tests/windows/install-test/service-test/tester.go +++ b/test/new-e2e/tests/windows/install-test/service-test/tester.go @@ -132,17 +132,17 @@ func (t *Tester) ExpectedServiceConfig() (windowsCommon.ServiceConfigMap, error) m["ddprocmon"].DisplayName = "Datadog Process Monitor" // ImagePath - exePath := quotePathIfContainsSpaces(fmt.Sprintf(`%s\bin\agent.exe`, t.expectedInstallPath)) + exePath := quotePathIfContainsSpaces(t.expectedInstallPath + "\\bin\\agent.exe") m["datadogagent"].ImagePath = exePath // TODO: double slash is intentional, must fix the path in the installer - exePath = quotePathIfContainsSpaces(fmt.Sprintf(`%s\bin\agent\trace-agent.exe`, t.expectedInstallPath)) + exePath = quotePathIfContainsSpaces(t.expectedInstallPath + "\\bin\\agent\\trace-agent.exe") m["datadog-trace-agent"].ImagePath = fmt.Sprintf(`%s --config="%s\\datadog.yaml"`, exePath, t.expectedConfigRoot) // TODO: double slash is intentional, must fix the path in the installer - exePath = quotePathIfContainsSpaces(fmt.Sprintf(`%s\bin\agent\process-agent.exe`, t.expectedInstallPath)) + exePath = quotePathIfContainsSpaces(t.expectedInstallPath + "\\bin\\agent\\process-agent.exe") m["datadog-process-agent"].ImagePath = fmt.Sprintf(`%s --cfgpath="%s\\datadog.yaml"`, exePath, t.expectedConfigRoot) - exePath = quotePathIfContainsSpaces(fmt.Sprintf(`%s\bin\agent\security-agent.exe`, t.expectedInstallPath)) + exePath = quotePathIfContainsSpaces(t.expectedInstallPath + "\\bin\\agent\\security-agent.exe") m["datadog-security-agent"].ImagePath = exePath - exePath = quotePathIfContainsSpaces(fmt.Sprintf(`%s\bin\agent\system-probe.exe`, t.expectedInstallPath)) + exePath = quotePathIfContainsSpaces(t.expectedInstallPath + "\\bin\\agent\\system-probe.exe") m["datadog-system-probe"].ImagePath = exePath // drivers use the kernel path syntax and aren't quoted since they are file paths rather than command lines m["ddnpm"].ImagePath = fmt.Sprintf(`\??\%s\bin\agent\driver\ddnpm.sys`, t.expectedInstallPath) diff --git a/test/new-e2e/tests/windows/install-test/upgrade_test.go b/test/new-e2e/tests/windows/install-test/upgrade_test.go index f6cdd349f029e3..3d056f1ee9f343 100644 --- a/test/new-e2e/tests/windows/install-test/upgrade_test.go +++ b/test/new-e2e/tests/windows/install-test/upgrade_test.go @@ -54,7 +54,7 @@ func (s *testUpgradeSuite) TestUpgrade() { } // upgrade to the new version - if !s.Run(fmt.Sprintf("upgrade to %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("upgrade to "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), @@ -91,7 +91,7 @@ func (s *testUpgradeFromLatestSuite) TestUpgradeFromLatest() { vm := s.Env().RemoteHost // install current version - if !s.Run(fmt.Sprintf("install %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("install "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "install.log")), @@ -106,7 +106,7 @@ func (s *testUpgradeFromLatestSuite) TestUpgradeFromLatest() { s.Require().NoError(err, "should get product version") // upgrade to test agent - if !s.Run(fmt.Sprintf("upgrade to %s", s.upgradeAgentPackge.AgentVersion()), func() { + if !s.Run("upgrade to "+s.upgradeAgentPackge.AgentVersion(), func() { _, err := s.InstallAgent(vm, windowsAgent.WithPackage(s.upgradeAgentPackge), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), @@ -204,7 +204,7 @@ func (s *testUpgradeRollbackWithoutCWSSuite) SetupSuite() { var err error majorVersion := strings.Split(s.AgentPackage.Version, ".")[0] s.previousAgentPackage = &windowsAgent.Package{ - Version: fmt.Sprintf("%s.51.0-1", majorVersion), + Version: majorVersion + ".51.0-1", Arch: "x86_64", } s.previousAgentPackage.URL, err = windowsAgent.GetStableMSIURL(s.previousAgentPackage.Version, s.previousAgentPackage.Arch, "") @@ -277,7 +277,7 @@ func (s *testUpgradeChangeUserSuite) TestUpgradeChangeUser() { s.installAndTestLastStable(host) // upgrade to the new version - if !s.Run(fmt.Sprintf("upgrade to %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("upgrade to "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(host, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), @@ -364,7 +364,7 @@ func (s *testUpgradeFromV5Suite) TestUpgrade5() { s.installAgent5() // upgrade to the new version - if !s.Run(fmt.Sprintf("upgrade to %s", s.AgentPackage.AgentVersion()), func() { + if !s.Run("upgrade to "+s.AgentPackage.AgentVersion(), func() { _, err := s.InstallAgent(host, windowsAgent.WithPackage(s.AgentPackage), windowsAgent.WithInstallLogFile(filepath.Join(s.SessionOutputDir(), "upgrade.log")), diff --git a/test/new-e2e/tests/windows/service-test/startstop_test.go b/test/new-e2e/tests/windows/service-test/startstop_test.go index e1f3cbf29d3725..d2874466368f16 100644 --- a/test/new-e2e/tests/windows/service-test/startstop_test.go +++ b/test/new-e2e/tests/windows/service-test/startstop_test.go @@ -409,7 +409,7 @@ func (s *agentServiceDisabledSuite) TestStartingDisabledService() { if !slices.Contains(kernel, service) { // try and start it and verify that it does correctly outputs to event log err := windowsCommon.StartService(s.Env().RemoteHost, service) - s.Require().NoError(err, fmt.Sprintf("should start %s", service)) + s.Require().NoError(err, "should start "+service) // verify that service returns to stopped state s.assertServiceState("Stopped", service, nil) @@ -645,7 +645,7 @@ func (s *baseStartStopSuite) AfterTest(suiteName, testName string) { for _, logName := range []string{"System", "Application"} { // collect the full event log as an evtx file s.T().Logf("Exporting %s event log", logName) - outputPath := filepath.Join(s.SessionOutputDir(), fmt.Sprintf("%s.evtx", logName)) + outputPath := filepath.Join(s.SessionOutputDir(), logName+".evtx") err := windowsCommon.ExportEventLog(host, logName, outputPath) s.Assert().NoError(err, "should export %s event log", logName) // Log errors and warnings to the screen for easy access