diff --git a/.github/workflows/loadtest-hourly.yaml b/.github/workflows/loadtest-hourly.yaml deleted file mode 100644 index 1e0e9638ce..0000000000 --- a/.github/workflows/loadtest-hourly.yaml +++ /dev/null @@ -1,89 +0,0 @@ -name: Prod Load Test hourly probe - -on: - schedule: - - cron: "15 * * * *" - workflow_dispatch: - -jobs: - load_test: - strategy: - fail-fast: false - matrix: - cluster: - - label: stone-prd-rh01 - repo: https://github.com/rhtap-perf-test/nodejs-devfile-sample1 - member_cluster_secret: MEMBER_CLUSTER_STONE_PRD_RH01 - ocp_prometheus_token_secret: OCP_PROMETHEUS_TOKEN_STONE_PRD_RH01 - users_secret: USERS_STONE_PRD_RH01 - should_fail: false - - label: stone-stg-rh01 - repo: https://github.com/rhtap-perf-test/nodejs-devfile-sample2 - member_cluster_secret: MEMBER_CLUSTER_STONE_STG_RH01 - ocp_prometheus_token_secret: OCP_PROMETHEUS_TOKEN_STONE_STG_RH01 - users_secret: USERS_STONE_STG_RH01 - should_fail: false - - label: kflux-prd-rh02 - repo: https://github.com/rhtap-perf-test/nodejs-devfile-sample3 - member_cluster_secret: MEMBER_CLUSTER_KFLUX_PRD_RH02 - ocp_prometheus_token_secret: OCP_PROMETHEUS_TOKEN_KFLUX_PRD_RH02 - users_secret: USERS_KFLUX_PRD_RH02 - should_fail: false - - runs-on: ubuntu-latest - timeout-minutes: 120 - # continue even if the job fails - continue-on-error: true - - # Make sure this action does not get scheduled by cron on e2e-tests forks - if: ${{ github.repository_owner == 'konflux-ci' || github.event_name != 'schedule' }} - - env: - ARTIFACT_DIR: ${{ github.workspace }}/tests/load-test/artifacts/ - - steps: - - - name: Checkout code - uses: actions/checkout@v3 - - - name: Set up jq - run: | - sudo apt-get update - sudo apt-get install -y jq - - - name: Prepare list of users - working-directory: ./tests/load-tests - env: - USERS: ${{ secrets[matrix.cluster.users_secret] }} - run: echo "$USERS" > users.json - - - name: Run Load Test - working-directory: ./tests/load-tests - env: - SCENARIO: COMPONENT_REPO=${{ matrix.cluster.repo }} CONCURRENCY=1 COMPONENT_DOCKERFILE_PATH=Dockerfile MY_GITHUB_ORG=rhtap-perf-test - GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - MY_GITHUB_ORG: "rhtap-perf-test" - run: | - export $SCENARIO - ./run-stage.sh - - - name: Collect results - working-directory: ./tests/load-tests - env: - MEMBER_CLUSTER: ${{ secrets[matrix.cluster.member_cluster_secret] }} - OCP_PROMETHEUS_TOKEN: ${{ secrets[matrix.cluster.ocp_prometheus_token_secret] }} - run: | - export $SCENARIO - export MEMBER_CLUSTER - export OCP_PROMETHEUS_TOKEN - ./ci-scripts/stage/collect-results.sh ${CONCURRENCY:-1} ${PWD} - - - name: List files in the artifact directory - run: ls -la ${{ env.ARTIFACT_DIR }} - - - name: Archive artifacts - uses: actions/upload-artifact@v4 - with: - name: rhtap-load-test-${{ matrix.cluster.label }} - path: ${{ env.ARTIFACT_DIR }} - include-hidden-files: true diff --git a/go.mod b/go.mod index d1ff501b5d..424dd5bac5 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( github.com/stretchr/testify v1.10.0 github.com/tektoncd/cli v0.33.0 github.com/tektoncd/pipeline v0.68.0 - github.com/xanzy/go-gitlab v0.104.1 + github.com/xanzy/go-gitlab v0.110.0 golang.org/x/crypto v0.36.0 golang.org/x/oauth2 v0.25.0 golang.org/x/tools v0.28.0 diff --git a/go.sum b/go.sum index 234bd8f542..8cbb4dffa0 100644 --- a/go.sum +++ b/go.sum @@ -1939,6 +1939,8 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xanzy/go-gitlab v0.104.1 h1:g/liXIPJH0jsTwVuzTAUMiKdTf6Qup3u2XZq5Rp90Wc= github.com/xanzy/go-gitlab v0.104.1/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= +github.com/xanzy/go-gitlab v0.110.0 h1:hsFIFp01v/0D0sdUXoZfRk6CROzZbHQplk6NzKSFKhc= +github.com/xanzy/go-gitlab v0.110.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= diff --git a/pkg/clients/common/service_account.go b/pkg/clients/common/service_account.go index 3d4fb4d858..095311af98 100644 --- a/pkg/clients/common/service_account.go +++ b/pkg/clients/common/service_account.go @@ -5,6 +5,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + . "github.com/onsi/ginkgo/v2" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -17,6 +18,7 @@ func (s *SuiteController) ServiceAccountPresent(saName, namespace string) wait.C return func() (bool, error) { _, err := s.GetServiceAccount(saName, namespace) if err != nil { + GinkgoWriter.Printf("failed to get service account %s in namespace %s: %+v\n", saName, namespace, err) return false, nil } return true, nil diff --git a/pkg/clients/github/repositories.go b/pkg/clients/github/repositories.go index 9c650eb294..d7a0c93ddc 100644 --- a/pkg/clients/github/repositories.go +++ b/pkg/clients/github/repositories.go @@ -66,11 +66,15 @@ func (g *Github) CreateFile(repository, pathToFile, fileContent, branchName stri } func (g *Github) GetFile(repository, pathToFile, branchName string) (*github.RepositoryContent, error) { + return g.GetFileWithOrg(g.organization, repository, pathToFile, branchName) +} + +func (g *Github) GetFileWithOrg(org, repository, pathToFile, branchName string) (*github.RepositoryContent, error) { opts := &github.RepositoryContentGetOptions{} if branchName != "" { opts.Ref = fmt.Sprintf(HEADS, branchName) } - file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.organization, repository, pathToFile, opts) + file, _, _, err := g.client.Repositories.GetContents(context.Background(), org, repository, pathToFile, opts) if err != nil { return nil, fmt.Errorf("error when listing file contents: %v", err) } @@ -158,20 +162,21 @@ func (g *Github) DeleteRepositoryIfExists(name string) error { _, resp, err := g.client.Repositories.Get(ctx, g.organization, name) if err != nil { - if resp.StatusCode != 404 { - return fmt.Errorf("Error checking repository %s/%s: %v\n", g.organization, name, err) - } - } else { - _, deleteErr := g.client.Repositories.Delete(ctx, g.organization, name) - if deleteErr != nil { - return fmt.Errorf("Error deleting repository %s/%s: %v\n", g.organization, name, deleteErr) + if resp != nil && resp.StatusCode == 404 { + return nil } + return fmt.Errorf("Error checking repository %s/%s: %v", g.organization, name, err) + } + + _, deleteErr := g.client.Repositories.Delete(ctx, g.organization, name) + if deleteErr != nil { + return fmt.Errorf("Error deleting repository %s/%s: %v", g.organization, name, deleteErr) } return nil } -func (g *Github) ForkRepository(sourceName, targetName string) (*github.Repository, error) { +func (g *Github) ForkRepositoryWithOrgs(sourceOrgName, sourceName, targetOrgName, targetName string) (*github.Repository, error) { var fork *github.Repository var resp *github.Response var repo *github.Repository @@ -179,11 +184,11 @@ func (g *Github) ForkRepository(sourceName, targetName string) (*github.Reposito ctx := context.Background() forkOptions := &github.RepositoryCreateForkOptions{ - Organization: g.organization, + Organization: targetOrgName, } err1 := utils.WaitUntilWithInterval(func() (done bool, err error) { - fork, resp, err = g.client.Repositories.CreateFork(ctx, g.organization, sourceName, forkOptions) + fork, resp, err = g.client.Repositories.CreateFork(ctx, sourceOrgName, sourceName, forkOptions) if err != nil { if _, ok := err.(*github.AcceptedError); ok && resp.StatusCode == 202 { // This meens forking is happening asynchronously @@ -200,25 +205,25 @@ func (g *Github) ForkRepository(sourceName, targetName string) (*github.Reposito fmt.Printf("Warning, got 500: %s", resp.Body) return false, nil } - return false, fmt.Errorf("Error forking %s/%s: %v", g.organization, sourceName, err) + return false, fmt.Errorf("Error forking %s/%s: %v", sourceOrgName, sourceName, err) } return true, nil - }, time.Second * 10, time.Minute * 30) + }, time.Second * 10, time.Minute * 5) if err1 != nil { - return nil, fmt.Errorf("Failed waiting for fork %s/%s: %v", g.organization, sourceName, err1) + return nil, fmt.Errorf("Failed waiting for fork %s/%s: %v", sourceOrgName, sourceName, err1) } err2 := utils.WaitUntilWithInterval(func() (done bool, err error) { // Using this to detect repo is created and populated with content // https://stackoverflow.com/questions/33666838/determine-if-a-fork-is-ready - _, _, err = g.client.Repositories.ListCommits(ctx, g.organization, fork.GetName(), &github.CommitsListOptions{}) + _, _, err = g.client.Repositories.ListCommits(ctx, targetOrgName, fork.GetName(), &github.CommitsListOptions{}) if err != nil { return false, nil } return true, nil }, time.Second * 10, time.Minute * 10) if err2 != nil { - return nil, fmt.Errorf("Failed waiting for commits %s/%s: %v", g.organization, sourceName, err2) + return nil, fmt.Errorf("Failed waiting for commits %s/%s: %v", targetOrgName, fork.GetName(), err2) } editedRepo := &github.Repository{ @@ -226,20 +231,35 @@ func (g *Github) ForkRepository(sourceName, targetName string) (*github.Reposito } err3 := utils.WaitUntilWithInterval(func() (done bool, err error) { - repo, resp, err = g.client.Repositories.Edit(ctx, g.organization, fork.GetName(), editedRepo) + repo, resp, err = g.client.Repositories.Edit(ctx, targetOrgName, fork.GetName(), editedRepo) if err != nil { if resp.StatusCode == 422 { // This started to happen recently. Docs says 422 is "Validation failed, or the endpoint has been spammed." so we need to be patient. // Error we are getting: "422 Validation Failed [{Resource:Repository Field:name Code:custom Message:name a repository operation is already in progress}]" return false, nil } - return false, fmt.Errorf("Error renaming %s/%s to %s: %v\n", g.organization, fork.GetName(), targetName, err) + return false, fmt.Errorf("Error renaming %s/%s to %s: %v", targetOrgName, fork.GetName(), targetName, err) } return true, nil }, time.Second * 10, time.Minute * 10) if err3 != nil { - return nil, fmt.Errorf("Failed waiting for renaming %s/%s: %v", g.organization, targetName, err3) + return nil, fmt.Errorf("Failed waiting for renaming %s/%s: %v", targetOrgName, targetName, err3) } return repo, nil } + +// Fork repository in our organization +func (g *Github) ForkRepository(sourceName, targetName string) (*github.Repository, error) { + return g.ForkRepositoryWithOrgs(g.organization, sourceName, g.organization, targetName) +} + +// For repozitory from our organization to another org +func (g *Github) ForkRepositoryToOrg(sourceName, targetName, targetOrgName string) (*github.Repository, error) { + return g.ForkRepositoryWithOrgs(g.organization, sourceName, targetOrgName, targetName) +} + +// Fork repository from another organization to our org +func (g *Github) ForkRepositoryFromOrg(sourceName, targetName, sourceOrgName string) (*github.Repository, error) { + return g.ForkRepositoryWithOrgs(sourceOrgName, sourceName, g.organization, targetName) +} diff --git a/pkg/clients/gitlab/git.go b/pkg/clients/gitlab/git.go index b9c63b27c1..41963ab571 100644 --- a/pkg/clients/gitlab/git.go +++ b/pkg/clients/gitlab/git.go @@ -9,6 +9,8 @@ import ( . "github.com/onsi/gomega" "github.com/xanzy/go-gitlab" + + utils "github.com/konflux-ci/e2e-tests/pkg/utils" ) // CreateBranch creates a new branch in a GitLab project with the given projectID and newBranchName @@ -67,7 +69,7 @@ func (gc *GitlabClient) CreateGitlabNewBranch(projectID, branchName, sha, baseBr // If sha is not provided, get the latest commit from the base branch if sha == "" { - commit, _, err := gc.client.Commits.GetCommit(projectID, baseBranch) + commit, _, err := gc.client.Commits.GetCommit(projectID, baseBranch, &gitlab.GetCommitOptions{}) if err != nil { return fmt.Errorf("failed to get latest commit from base branch: %v", err) } @@ -252,3 +254,136 @@ func (gc *GitlabClient) GetCommitStatusConclusion(statusName, projectID, commitS return matchingStatus.Status } + +// DeleteRepositoryIfExists deletes a GitLab repository if it exists. +// Returns an error if the deletion fails except for project not being found (404). +func (gc *GitlabClient) DeleteRepositoryIfExists(projectID string) error { + getProj, getResp, getErr := gc.client.Projects.GetProject(projectID, nil) + if getErr != nil { + if getResp != nil && getResp.StatusCode == http.StatusNotFound { + return nil + } else { + return fmt.Errorf("Error getting project %s: %v", projectID, getErr) + } + } + if getProj.PathWithNamespace != projectID && strings.Contains(getProj.PathWithNamespace, projectID + "-deleted-") { + // We asked for repo like "jhutar/nodejs-devfile-sample7-ocpp01v1-konflux-perfscale" + // and got "jhutar/nodejs-devfile-sample7-ocpp01v1-konflux-perfscale-deleted-138805" + // and that means repo was moved by being deleted for a first + // time, entering a grace period. + + // Now we need to delete the repository for a second time to limit + // number of repos we keep behind as per request in INC3755661 + err := gc.DeleteRepositoryReally(getProj.PathWithNamespace) + return err + } + + resp, err := gc.client.Projects.DeleteProject(projectID, &gitlab.DeleteProjectOptions{}) + + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil + } + return fmt.Errorf("Error deleting project %s: %w", projectID, err) + } + + if resp.StatusCode != http.StatusAccepted { + return fmt.Errorf("Unexpected status code when deleting project %s: %d", projectID, resp.StatusCode) + } + + err = utils.WaitUntilWithInterval(func() (done bool, err error) { + getProj, getResp, getErr := gc.client.Projects.GetProject(projectID, nil) + + if getErr != nil { + if getResp != nil && getResp.StatusCode == http.StatusNotFound { + return true, nil + } else { + return false, getErr + } + } + + if getProj.PathWithNamespace != projectID && strings.Contains(getProj.PathWithNamespace, projectID + "-deleted-") { + errDel := gc.DeleteRepositoryReally(getProj.PathWithNamespace) + if errDel != nil { + return false, errDel + } + return true, nil + } + + fmt.Printf("Repo %s still exists: %v\n", projectID, getResp) + return false, nil + }, time.Second * 10, time.Minute * 5) + + return err +} + +// GitLab have a concept of two deletes. First one just renames the repo, +// and only second one really deletes it. DeleteRepositoryReally is meant for +// the second deletition. +func (gc *GitlabClient) DeleteRepositoryReally(projectID string) error { + opts := &gitlab.DeleteProjectOptions{ + FullPath: gitlab.Ptr(projectID), + PermanentlyRemove: gitlab.Ptr(true), + } + _, err := gc.client.Projects.DeleteProject(projectID, opts) + if err != nil { + return fmt.Errorf("Error on permanently deleting project %s: %w", projectID, err) + } + return nil +} + +// ForkRepository forks a source GitLab repository to a target repository. +// Returns the newly forked repository and an error if the operation fails. +func (gc *GitlabClient) ForkRepository(sourceOrgName, sourceName, targetOrgName, targetName string) (*gitlab.Project, error) { + var forkedProject *gitlab.Project + var resp *gitlab.Response + var err error + + sourceProjectID := sourceOrgName + "/" + sourceName + targetProjectID := targetOrgName + "/" + targetName + + opts := &gitlab.ForkProjectOptions{ + Name: gitlab.Ptr(targetName), + NamespacePath: gitlab.Ptr(targetOrgName), + Path: gitlab.Ptr(targetName), + } + + err = utils.WaitUntilWithInterval(func() (done bool, err error) { + forkedProject, resp, err = gc.client.Projects.ForkProject(sourceProjectID, opts) + if err != nil { + fmt.Printf("Failed to fork %s, trying again: %v\n", sourceProjectID, err) + return false, nil + } + return true, nil + }, time.Second * 10, time.Minute * 5) + if err != nil { + return nil, fmt.Errorf("Error forking project %s to %s: %w", sourceProjectID, targetProjectID, err) + } + + if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusAccepted { + return nil, fmt.Errorf("Unexpected status code when forking project %s: %d", sourceProjectID, resp.StatusCode) + } + + err = utils.WaitUntilWithInterval(func() (done bool, err error) { + var getErr error + + forkedProject, _, getErr = gc.client.Projects.GetProject(forkedProject.ID, nil) + if getErr != nil { + return false, fmt.Errorf("Error getting forked project status for %s (ID: %d): %w", forkedProject.Name, forkedProject.ID, getErr) + } + + if forkedProject.ImportStatus == "finished" { + return true, nil + } else if forkedProject.ImportStatus == "failed" || forkedProject.ImportStatus == "timeout" { + return false, fmt.Errorf("Forking of project %s (ID: %d) failed with import status: %s", forkedProject.Name, forkedProject.ID, forkedProject.ImportStatus) + } + + return false, nil + }, time.Second * 10, time.Minute * 10) + + if err != nil { + return nil, fmt.Errorf("Error waiting for project %s (ID: %d) fork to complete: %w", targetProjectID, forkedProject.ID, err) + } + + return forkedProject, nil +} diff --git a/pkg/clients/has/components.go b/pkg/clients/has/components.go index 8e8d9cc91a..7ca6c2c6fe 100644 --- a/pkg/clients/has/components.go +++ b/pkg/clients/has/components.go @@ -303,10 +303,22 @@ func (h *HasController) CreateComponent(componentSpec appservice.ComponentSpec, if err := h.KubeRest().Create(ctx, componentObject); err != nil { return nil, err } + + return componentObject, nil +} + +// Create a component and check image repository gets created. +func (h *HasController) CreateComponentCheckImageRepository(componentSpec appservice.ComponentSpec, namespace string, outputContainerImage string, secret string, applicationName string, skipInitialChecks bool, annotations map[string]string) (*appservice.Component, error) { + componentObject, err := h.CreateComponent(componentSpec, namespace, outputContainerImage, secret, applicationName, skipInitialChecks, annotations) + if err != nil { + return nil, err + } + // Decrease the timeout to 5 mins, when the issue https://issues.redhat.com/browse/STONEBLD-3552 is fixed - if err := utils.WaitUntil(h.CheckImageRepositoryExists(namespace, componentSpec.ComponentName), time.Minute*15); err != nil { + if err := utils.WaitUntilWithInterval(h.CheckImageRepositoryExists(namespace, componentSpec.ComponentName), time.Second*10, time.Minute*15); err != nil { return nil, fmt.Errorf("timed out waiting for image repository to be ready for component %s in namespace %s: %+v", componentSpec.ComponentName, namespace, err) } + return componentObject, nil } @@ -355,10 +367,6 @@ func (h *HasController) ScaleComponentReplicas(component *appservice.Component, // DeleteComponent delete an has component from a given name and namespace func (h *HasController) DeleteComponent(name string, namespace string, reportErrorOnNotFound bool) error { - // temporary logs - start := time.Now() - GinkgoWriter.Printf("Start to delete component '%s' at %s\n", name, start.Format(time.RFC3339)) - component := appservice.Component{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -374,19 +382,11 @@ func (h *HasController) DeleteComponent(name string, namespace string, reportErr // RHTAPBUGS-978: temporary timeout to 15min err := utils.WaitUntil(h.ComponentDeleted(&component), 15*time.Minute) - // temporary logs - deletionTime := time.Since(start).Minutes() - GinkgoWriter.Printf("Finish to delete component '%s' at %s. It took '%f' minutes\n", name, time.Now().Format(time.RFC3339), deletionTime) - return err } // DeleteAllComponentsInASpecificNamespace removes all component CRs from a specific namespace. Useful when creating a lot of resources and want to remove all of them func (h *HasController) DeleteAllComponentsInASpecificNamespace(namespace string, timeout time.Duration) error { - // temporary logs - start := time.Now() - GinkgoWriter.Printf("Start to delete all components in namespace '%s' at %s\n", namespace, start.String()) - if err := h.KubeRest().DeleteAllOf(context.Background(), &appservice.Component{}, rclient.InNamespace(namespace)); err != nil { return fmt.Errorf("error deleting components from the namespace %s: %+v", namespace, err) } @@ -400,10 +400,6 @@ func (h *HasController) DeleteAllComponentsInASpecificNamespace(namespace string return len(componentList.Items) == 0, nil }, timeout) - // temporary logs - deletionTime := time.Since(start).Minutes() - GinkgoWriter.Printf("Finish to delete all components in namespace '%s' at %s. It took '%f' minutes\n", namespace, time.Now().Format(time.RFC3339), deletionTime) - return err } @@ -557,6 +553,24 @@ func (h *HasController) CheckImageRepositoryExists(namespace, componentName stri } } +// DeleteAllImageRepositoriesInASpecificNamespace removes all image repository CRs from a specific namespace. Useful when cleaning up a namespace and component cleanup did not cleaned it's image repository +func (h *HasController) DeleteAllImageRepositoriesInASpecificNamespace(namespace string, timeout time.Duration) error { + if err := h.KubeRest().DeleteAllOf(context.Background(), &imagecontroller.ImageRepository{}, rclient.InNamespace(namespace)); err != nil { + return fmt.Errorf("error deleting image repositories from the namespace %s: %+v", namespace, err) + } + + imageRepositoryList := &imagecontroller.ImageRepositoryList{} + + err := utils.WaitUntil(func() (done bool, err error) { + if err := h.KubeRest().List(context.Background(), imageRepositoryList, &rclient.ListOptions{Namespace: namespace}); err != nil { + return false, nil + } + return len(imageRepositoryList.Items) == 0, nil + }, timeout) + + return err +} + // Gets value of a specified annotation in a component func (h *HasController) GetComponentAnnotation(componentName, annotationKey, namespace string) (string, error) { component, err := h.GetComponent(componentName, namespace) diff --git a/pkg/clients/kubernetes/client.go b/pkg/clients/kubernetes/client.go index c8b556a31d..a41cd044c8 100644 --- a/pkg/clients/kubernetes/client.go +++ b/pkg/clients/kubernetes/client.go @@ -111,7 +111,7 @@ func (c *CustomClient) DynamicClient() dynamic.Interface { // Creates Kubernetes clients: // 1. Will create a kubernetes client from default kubeconfig as kubeadmin // 2. Will create a sandbox user and will generate a client using user token a new client to create resources in RHTAP like a normal user -func NewDevSandboxProxyClient(userName string, isSA bool, options utils.Options) (*K8SClient, error) { +func NewDevSandboxProxyClient(userName string, options utils.Options) (*K8SClient, error) { var err error var sandboxController *sandbox.SandboxController var proxyAuthInfo *sandbox.SandboxUserAuthInfo @@ -121,7 +121,7 @@ func NewDevSandboxProxyClient(userName string, isSA bool, options utils.Options) if err != nil { return nil, err } - proxyAuthInfo, err = sandboxController.ReconcileUserCreationStage(userName, options.ToolchainApiUrl, options.KeycloakUrl, options.OfflineToken, isSA) + proxyAuthInfo, err = sandboxController.ReconcileUserCreationStage(userName, options.ApiUrl, options.Token) if err != nil { return nil, err } diff --git a/pkg/clients/release/releases.go b/pkg/clients/release/releases.go index dd69769d7d..6434749510 100644 --- a/pkg/clients/release/releases.go +++ b/pkg/clients/release/releases.go @@ -166,10 +166,18 @@ func (r *ReleaseController) GetPipelineRunInNamespace(namespace, releaseName, re err := r.KubeRest().List(context.Background(), pipelineRuns, opts...) - if err == nil && len(pipelineRuns.Items) > 0 { + if err == nil && len(pipelineRuns.Items) > 1 { + return &pipelineRuns.Items[0], fmt.Errorf("found multiple PipelineRun in managed namespace '%s' for a release '%s' in '%s' namespace", namespace, releaseName, releaseNamespace) + } + + if err == nil && len(pipelineRuns.Items) == 1 { return &pipelineRuns.Items[0], nil } + if err == nil && len(pipelineRuns.Items) == 0 { + return nil, fmt.Errorf("couldn't find PipelineRun in managed namespace '%s' for a release '%s' in '%s' namespace", namespace, releaseName, releaseNamespace) + } + return nil, fmt.Errorf("couldn't find PipelineRun in managed namespace '%s' for a release '%s' in '%s' namespace because of err:'%w'", namespace, releaseName, releaseNamespace, err) } diff --git a/pkg/clients/tekton/pipelineruns.go b/pkg/clients/tekton/pipelineruns.go index 58897a8bae..f85489c7cf 100644 --- a/pkg/clients/tekton/pipelineruns.go +++ b/pkg/clients/tekton/pipelineruns.go @@ -198,8 +198,12 @@ func (t *TektonController) DeletePipelineRunIgnoreFinalizers(ns, name string) er } if err := t.KubeRest().Delete(context.Background(), &pipelineRunCR); err != nil { - g.GinkgoWriter.Printf("unable to delete PipelineRun '%s' in '%s': %v\n", pipelineRunCR.Name, pipelineRunCR.Namespace, err) - return false, nil + if strings.HasSuffix(err.Error(), " not found") { + return true, nil + } else { + g.GinkgoWriter.Printf("unable to delete PipelineRun '%s' in '%s': %v\n", pipelineRunCR.Name, pipelineRunCR.Namespace, err) + return false, nil + } } return true, nil }) diff --git a/pkg/framework/framework.go b/pkg/framework/framework.go index 58d55ea56f..64f38ac5be 100644 --- a/pkg/framework/framework.go +++ b/pkg/framework/framework.go @@ -50,20 +50,7 @@ func NewFramework(userName string, stageConfig ...utils.Options) (*Framework, er return NewFrameworkWithTimeout(userName, time.Second*60, stageConfig...) } -// This periodically refreshes framework for Stage user because of Keycloak access token expires in 15 minutes -func refreshFrameworkStage(currentFramework *Framework, userName string, timeout time.Duration, options ...utils.Options) { - for { - time.Sleep(time.Minute * 10) - fw, err := newFrameworkWithTimeout(userName, timeout, options...) - if err != nil { - fmt.Printf("ERROR: Failed refreshing framework for user %s: %+v\n", userName, err) - return - } - *currentFramework = *fw - } -} - -func newFrameworkWithTimeout(userName string, timeout time.Duration, options ...utils.Options) (*Framework, error) { +func NewFrameworkWithTimeout(userName string, timeout time.Duration, options ...utils.Options) (*Framework, error) { var err error var k *kubeCl.K8SClient var clusterAppDomain, openshiftConsoleHost string @@ -73,11 +60,11 @@ func newFrameworkWithTimeout(userName string, timeout time.Duration, options ... if userName == "" { return nil, fmt.Errorf("userName cannot be empty when initializing a new framework instance") } - isStage, isSA, err := utils.CheckOptions(options) + isStage, err := utils.CheckOptions(options) if err != nil { return nil, err } - if len(options) == 1 { + if isStage { option = options[0] } else { option = utils.Options{} @@ -89,7 +76,7 @@ func newFrameworkWithTimeout(userName string, timeout time.Duration, options ... // Just try several times to get the user kubeconfig err = retry.Do( func() error { - if k, err = kubeCl.NewDevSandboxProxyClient(userName, isSA, option); err != nil { + if k, err = kubeCl.NewDevSandboxProxyClient(userName, option); err != nil { GinkgoWriter.Printf("error when creating dev sandbox proxy client: %+v\n", err) } return err @@ -182,25 +169,6 @@ func newFrameworkWithTimeout(userName string, timeout time.Duration, options ... }, nil } -func NewFrameworkWithTimeout(userName string, timeout time.Duration, options ...utils.Options) (*Framework, error) { - isStage, isSA, err := utils.CheckOptions(options) - if err != nil { - return nil, err - } - - if isStage && !isSA { - options[0].ToolchainApiUrl = fmt.Sprintf("%s/workspaces/%s", options[0].ToolchainApiUrl, userName) - } - - fw, err := newFrameworkWithTimeout(userName, timeout, options...) - - if isStage && !isSA { - go refreshFrameworkStage(fw, userName, timeout, options...) - } - - return fw, err -} - func InitControllerHub(cc *kubeCl.CustomClient) (*ControllerHub, error) { // Initialize Common controller commonCtrl, err := common.NewSuiteController(cc) diff --git a/pkg/sandbox/sandbox.go b/pkg/sandbox/sandbox.go index 197ddc9d64..e92a5f65df 100644 --- a/pkg/sandbox/sandbox.go +++ b/pkg/sandbox/sandbox.go @@ -149,30 +149,20 @@ func (lrt LoggingRoundTripper) RoundTrip(req *http.Request) (res *http.Response, // Handle the result. if e != nil { - GinkgoWriter.Printf("Sandbox proxy error: %v", e) + GinkgoWriter.Printf("Sandbox proxy error: %v\n", e) } return res, e } // ReconcileUserCreation create a user in sandbox and return a valid kubeconfig for user to be used for the tests -func (s *SandboxController) ReconcileUserCreationStage(userName, toolchainApiUrl, keycloakUrl, offlineToken string, isSA bool) (*SandboxUserAuthInfo, error) { +func (s *SandboxController) ReconcileUserCreationStage(userName, apiUrl, token string) (*SandboxUserAuthInfo, error) { wd, err := os.Getwd() if err != nil { return nil, err } kubeconfigPath := utils.GetEnv(constants.USER_KUBE_CONFIG_PATH_ENV, fmt.Sprintf("%s/tmp/%s.kubeconfig", wd, userName)) - var userToken string - if isSA { - userToken = offlineToken - } else { - userToken, err = s.GetKeycloakTokenStage(userName, keycloakUrl, offlineToken) - if err != nil { - return nil, err - } - } - - return s.GetKubeconfigPathForSpecificUser(true, toolchainApiUrl, userName, kubeconfigPath, userToken) + return s.GetKubeconfigPathForSpecificUser(true, apiUrl, userName, kubeconfigPath, token) } // ReconcileUserCreation create a user in sandbox and return a valid kubeconfig for user to be used for the tests @@ -298,7 +288,7 @@ func (s *SandboxController) UpdateUserSignup(userSignupName string, modifyUserSi modifyUserSignup(freshUserSignup) if err := s.KubeRest.Update(context.Background(), freshUserSignup); err != nil { - GinkgoWriter.Printf("error updating UserSignup '%s': %s. Will retry again...", userSignupName, err.Error()) + GinkgoWriter.Printf("error updating UserSignup '%s': %s. Will retry again...\n", userSignupName, err.Error()) return false, nil } userSignup = freshUserSignup diff --git a/pkg/utils/util.go b/pkg/utils/util.go index 167484fe1b..afbd3121dc 100644 --- a/pkg/utils/util.go +++ b/pkg/utils/util.go @@ -41,40 +41,31 @@ import ( ) type Options struct { - ToolchainApiUrl string - KeycloakUrl string - OfflineToken string + ApiUrl string + Token string } // check options are valid or not -func CheckOptions(optionsArr []Options) (bool, bool, error) { +func CheckOptions(optionsArr []Options) (bool, error) { if len(optionsArr) == 0 { - return false, false, nil + return false, nil } if len(optionsArr) > 1 { - return true, false, fmt.Errorf("options array contains more than 1 object") + return true, fmt.Errorf("options array contains more than 1 object") } options := optionsArr[0] - if options.ToolchainApiUrl == "" { - return true, false, fmt.Errorf("ToolchainApiUrl field is empty") + if options.ApiUrl == "" { + return true, fmt.Errorf("ApiUrl field is empty") } - if options.KeycloakUrl == "" { - return true, false, fmt.Errorf("KeycloakUrl field is empty") + if options.Token == "" { + return true, fmt.Errorf("Token field is empty") } - if options.OfflineToken == "" { - return true, false, fmt.Errorf("OfflineToken field is empty") - } - - if options.KeycloakUrl == "DIRECT" { - return true, true, nil - } else { - return true, false, nil - } + return true, nil } // CheckIfEnvironmentExists return true/false if the environment variable exists diff --git a/tests/build/build.go b/tests/build/build.go index 341f958ea4..23953b5c69 100644 --- a/tests/build/build.go +++ b/tests/build/build.go @@ -143,7 +143,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser }, } - _, err = f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPrivateRepo), buildPipelineAnnotation)) + _, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPrivateRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) @@ -269,10 +269,13 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser It("PR branch should not exist in the repo", func() { timeout = time.Second * 60 interval = time.Second * 1 - Eventually(func() bool { + Eventually(func() (bool, error) { exists, err := gitClient.BranchExists(helloWorldRepository, customDefaultComponentBranch) - Expect(err).ShouldNot(HaveOccurred()) - return exists + if err != nil { + Expect(err.Error()).To(Or(ContainSubstring("Reference does not exist"), ContainSubstring("404"))) + return false, nil + } + return exists, nil }, timeout, interval).Should(BeFalse(), fmt.Sprintf("timed out when waiting for the branch %s to be deleted from %s repository", customDefaultComponentBranch, helloWorldComponentGitSourceRepoName)) }) @@ -319,7 +322,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser }, } // Create a component with Git Source URL, a specified git branch and marking delete-repo=true - component, err = f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + component, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) @@ -666,7 +669,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser }, } - _, err = f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + _, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) @@ -776,7 +779,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser }, }, } - component, err = f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + component, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) @@ -900,7 +903,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser }, }, } - _, err = fw.AsKubeAdmin.HasController.CreateComponent(componentObj, namespace, "", "", appName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + _, err = fw.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, namespace, "", "", appName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) @@ -1046,7 +1049,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser }, }, } - _, err := f.AsKubeAdmin.HasController.CreateComponent(componentObj1, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + _, err := f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj1, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) It("creates second component", func() { @@ -1063,7 +1066,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser }, }, } - _, err := f.AsKubeAdmin.HasController.CreateComponent(componentObj2, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + _, err := f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj2, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) @@ -1179,7 +1182,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser }, } - component, err = f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(invalidBuildAnnotation, buildPipelineAnnotation)) + component, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(invalidBuildAnnotation, buildPipelineAnnotation)) Expect(component).ToNot(BeNil()) Expect(err).ShouldNot(HaveOccurred()) }) @@ -1241,7 +1244,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser ComponentName: fmt.Sprintf("build-suite-test-component-image-source-%s", util.GenerateRandomString(6)), ContainerImage: containerImageSource, } - _, err = f.AsKubeAdmin.HasController.CreateComponent(component, testNamespace, outputContainerImage, "", applicationName, true, buildPipelineAnnotation) + _, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(component, testNamespace, outputContainerImage, "", applicationName, true, buildPipelineAnnotation) Expect(err).ShouldNot(HaveOccurred()) // get the build pipeline bundle annotation @@ -1405,12 +1408,13 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser println("deleting branch " + c.componentBranch) err = gitClient.DeleteBranch(repositories[i], c.componentBranch) if err != nil { - Expect(err.Error()).To(Or(ContainSubstring("Reference does not exist"), ContainSubstring("Branch Not Found"))) + Expect(err.Error()).To(Or(ContainSubstring("Reference does not exist"), ContainSubstring("404 Not Found"), ContainSubstring("Branch Not Found"))) } err = gitClient.DeleteBranch(repositories[i], c.pacBranchName) if err != nil { - Expect(err.Error()).To(Or(ContainSubstring("Reference does not exist"), ContainSubstring("Branch Not Found"))) + Expect(err.Error()).To(Or(ContainSubstring("Reference does not exist"), ContainSubstring("404 Not Found"), ContainSubstring("Branch Not Found"))) } + // Cleanup parent repo webhooks err = gitClient.CleanupWebhooks(componentDependenciesParentRepoName, f.ClusterAppDomain) if err != nil { @@ -1446,7 +1450,7 @@ var _ = framework.BuildSuiteDescribe("Build service E2E tests", Label("build-ser if comp.repoName == componentDependenciesParentRepoName { componentObj.BuildNudgesRef = []string{ChildComponentDef.componentName} } - comp.component, err = f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, true, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + comp.component, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, true, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) } }) diff --git a/tests/build/build_templates.go b/tests/build/build_templates.go index 761e96556b..983d0b9bb6 100644 --- a/tests/build/build_templates.go +++ b/tests/build/build_templates.go @@ -150,7 +150,7 @@ func CreateComponent(commonCtrl *common.SuiteController, ctrl *has.HasController "build.appstudio.openshift.io/pipeline": fmt.Sprintf(`{"name":"%s", "bundle": "%s"}`, pipelineBundleName, customBuildBundle), } } - c, err := ctrl.CreateComponent(componentObj, namespace, "", "", applicationName, false, utils.MergeMaps(constants.ComponentPaCRequestAnnotation, buildPipelineAnnotation)) + c, err := ctrl.CreateComponentCheckImageRepository(componentObj, namespace, "", "", applicationName, false, utils.MergeMaps(constants.ComponentPaCRequestAnnotation, buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) Expect(c.Name).Should(Equal(componentName)) diff --git a/tests/build/multi-platform.go b/tests/build/multi-platform.go index 842832c9bd..9486bd9fb6 100644 --- a/tests/build/multi-platform.go +++ b/tests/build/multi-platform.go @@ -493,7 +493,7 @@ func createApplicationAndComponent(f *framework.Framework, testNamespace, platfo }, }, } - component, err = f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, true, utils.MergeMaps(constants.ComponentPaCRequestAnnotation, buildPipelineAnnotation)) + component, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, true, utils.MergeMaps(constants.ComponentPaCRequestAnnotation, buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) return } diff --git a/tests/integration-service/gitlab-integration-reporting.go b/tests/integration-service/gitlab-integration-reporting.go index aae6c2ba67..03d9aed783 100644 --- a/tests/integration-service/gitlab-integration-reporting.go +++ b/tests/integration-service/gitlab-integration-reporting.go @@ -114,7 +114,7 @@ var _ = framework.IntegrationServiceSuiteDescribe("Gitlab Status Reporting of In // get the build pipeline bundle annotation buildPipelineAnnotation := build.GetBuildPipelineBundleAnnotation(constants.DockerBuild) // Create a component with Git Source URL, a specified git branch - component, err = f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + component, err = f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) diff --git a/tests/integration-service/integration.go b/tests/integration-service/integration.go index bb536c31e0..41cd5d1d16 100644 --- a/tests/integration-service/integration.go +++ b/tests/integration-service/integration.go @@ -452,7 +452,7 @@ func createComponent(f framework.Framework, testNamespace, applicationName, comp }, } - originalComponent, err := f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + originalComponent, err := f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, false, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).NotTo(HaveOccurred()) return originalComponent, componentName, pacBranchName, componentBaseBranchName @@ -480,7 +480,7 @@ func createComponentWithCustomBranch(f framework.Framework, testNamespace, appli }, } - originalComponent, err := f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, true, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + originalComponent, err := f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, true, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).NotTo(HaveOccurred()) return originalComponent diff --git a/tests/konflux-demo/konflux-demo.go b/tests/konflux-demo/konflux-demo.go index 8fcb154933..70cfd1fa7b 100644 --- a/tests/konflux-demo/konflux-demo.go +++ b/tests/konflux-demo/konflux-demo.go @@ -178,7 +178,7 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { }, } - component, err = fw.AsKubeAdmin.HasController.CreateComponent(componentObj, userNamespace, "", "", appSpec.ApplicationName, false, utils.MergeMaps(constants.ComponentPaCRequestAnnotation, buildPipelineAnnotation)) + component, err = fw.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, userNamespace, "", "", appSpec.ApplicationName, false, utils.MergeMaps(constants.ComponentPaCRequestAnnotation, buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) diff --git a/tests/load-tests/.gitignore b/tests/load-tests/.gitignore index aff54803fd..841c3cd016 100644 --- a/tests/load-tests/.gitignore +++ b/tests/load-tests/.gitignore @@ -5,4 +5,13 @@ load-tests.json load-tests.*.json output.json -users.json +users*.json + +loadtest + +OLD/ +run-*/ +videos/ +collected-data/ +mystoneinst/ +secrets/ diff --git a/tests/load-tests/ci-scripts/collect-results.sh b/tests/load-tests/ci-scripts/collect-results.sh index b657166683..d3b9c8eddd 100755 --- a/tests/load-tests/ci-scripts/collect-results.sh +++ b/tests/load-tests/ci-scripts/collect-results.sh @@ -16,6 +16,8 @@ ARTIFACT_DIR=${ARTIFACT_DIR:-artifacts} mkdir -p ${ARTIFACT_DIR} pushd "${2:-./tests/load-tests}" +{ + echo "[$(date --utc -Ins)] Collecting artifacts" find . -maxdepth 1 -type f -name '*.log' -exec cp -vf {} "${ARTIFACT_DIR}" \; find . -maxdepth 1 -type f -name '*.csv' -exec cp -vf {} "${ARTIFACT_DIR}" \; @@ -35,24 +37,24 @@ python3 -m pip install matplotlib } &>"${ARTIFACT_DIR}/monitoring-setup.log" echo "[$(date --utc -Ins)] Create summary JSON with timings" -./evaluate.py "${ARTIFACT_DIR}/load-test-timings.csv" "${ARTIFACT_DIR}/load-test-timings.json" +./evaluate.py "${ARTIFACT_DIR}/load-test-options.json" "${ARTIFACT_DIR}/load-test-timings.csv" "${ARTIFACT_DIR}/load-test-timings.json" -echo "[$(date --utc -Ins)] Counting PRs and TRs" -ci-scripts/utility_scripts/count-multiarch-taskruns.py --data-dir "${ARTIFACT_DIR}" >"${ARTIFACT_DIR}/count-multiarch-taskruns.log" +echo "[$(date --utc -Ins)] Create summary JSON with errors" +./errors.py "${ARTIFACT_DIR}/load-test-errors.csv" "${ARTIFACT_DIR}/load-test-timings.json" "${ARTIFACT_DIR}/load-test-errors.json" echo "[$(date --utc -Ins)] Graphing PRs and TRs" -ci-scripts/utility_scripts/show-pipelineruns.py --data-dir "${ARTIFACT_DIR}" >"${ARTIFACT_DIR}/show-pipelineruns.log" +ci-scripts/utility_scripts/show-pipelineruns.py --data-dir "${ARTIFACT_DIR}" &>"${ARTIFACT_DIR}/show-pipelineruns.log" mv "${ARTIFACT_DIR}/output.svg" "${ARTIFACT_DIR}/show-pipelines.svg" echo "[$(date --utc -Ins)] Computing duration of PRs, TRs and steps" -ci-scripts/utility_scripts/get-taskruns-durations.py --data-dir "${ARTIFACT_DIR}" --dump-json "${ARTIFACT_DIR}/get-taskruns-durations.json" >"${ARTIFACT_DIR}/get-taskruns-durations.log" +ci-scripts/utility_scripts/get-taskruns-durations.py --debug --data-dir "${ARTIFACT_DIR}" --dump-json "${ARTIFACT_DIR}/get-taskruns-durations.json" &>"${ARTIFACT_DIR}/get-taskruns-durations.log" echo "[$(date --utc -Ins)] Creating main status data file" STATUS_DATA_FILE="${ARTIFACT_DIR}/load-test.json" status_data.py \ --status-data-file "${STATUS_DATA_FILE}" \ --set "name=Konflux loadtest" "started=$( cat started )" "ended=$( cat ended )" \ - --set-subtree-json "parameters.options=${ARTIFACT_DIR}/load-test-options.json" "results.measurements=${ARTIFACT_DIR}/load-test-timings.json" "results.durations=${ARTIFACT_DIR}/get-taskruns-durations.json" + --set-subtree-json "parameters.options=${ARTIFACT_DIR}/load-test-options.json" "results.measurements=${ARTIFACT_DIR}/load-test-timings.json" "results.errors=${ARTIFACT_DIR}/load-test-errors.json" "results.durations=${ARTIFACT_DIR}/get-taskruns-durations.json" echo "[$(date --utc -Ins)] Adding monitoring data" mstarted="$( date -d "$( cat started )" --utc -Iseconds )" @@ -148,4 +150,6 @@ fi #$tapa all "${pipelinerun_stub}.json" "${taskrun_stub}.json" "${pod_stub}.json" >"$tapa_tmp" #sort_csv "$tapa_tmp" "$tapa_all_csv" +} 2>&1 | tee "${ARTIFACT_DIR}/collect-results.log" + popd diff --git a/tests/load-tests/ci-scripts/config/horreum-labels.sh b/tests/load-tests/ci-scripts/config/horreum-labels.sh new file mode 100755 index 0000000000..99ca63a544 --- /dev/null +++ b/tests/load-tests/ci-scripts/config/horreum-labels.sh @@ -0,0 +1,248 @@ +#!/bin/bash + +set -eu -o pipefail + +# Here we are using 'shovel.py' utility from OPL: +# +# https://github.com/redhat-performance/opl/ +# +# Example of some commands are: +# +# shovel.py horreum --base-url https://horreum.corp.redhat.com/ --api-token "$HORREUM_API_TOKEN" schema-label-add --schema-uri "urn:rhtap-perf-team-load-test:1.0" --extractor-jsonpath "\$.xyz" --metrics --owner hybrid-cloud-experience-perfscale-team +# +# shovel.py horreum --base-url https://horreum.corp.redhat.com/ --api-token "$HORREUM_API_TOKEN" schema-label-list --schema-uri "urn:rhtap-perf-team-load-test:1.0" | grep xyz +# +# shovel.py horreum --base-url https://horreum.corp.redhat.com/ --api-token "$HORREUM_API_TOKEN" schema-label-add --schema-uri "urn:rhtap-perf-team-load-test:1.0" --extractor-jsonpath "\$.xyz" --metrics --owner hybrid-cloud-experience-perfscale-team --name something --update-by-id 999999 +# +# shovel.py horreum --base-url https://horreum.corp.redhat.com/ --api-token "$HORREUM_API_TOKEN" schema-label-delete --schema-uri "urn:rhtap-perf-team-load-test:1.0" --id 999999 +# +# But here we are using just one that updates (or adds if label with the name is missing) labels for given extractor JSON path expressions: +# +# I'm using this helper to add new labels for new test phases when they are processed by evaluate.py and stored into load-test-timings.json: +# +# jq -r '. | keys[]' load-test-timings.json | grep -v '^KPI$' | while read m; do echo "horreum_schema_label_present '\$.results.measurements.$m.error_rate'"; echo "horreum_schema_label_present '\$.results.measurements.$m.pass.duration.mean'"; done | LANG=C sort >/tmp/list.sh +# meld /tmp/list.sh ci-scripts/config/horreum-labels.sh + +function horreum_schema_label_present() { + local extractor="$1" + shovel.py \ + --verbose \ + horreum \ + --base-url https://horreum.corp.redhat.com/ \ + --api-token "$HORREUM_API_TOKEN" \ + schema-label-update \ + --schema-uri "urn:rhtap-perf-team-load-test:1.0" \ + --metrics \ + --owner hybrid-cloud-experience-perfscale-team \ + --update-by-name \ + --add-if-missing \ + --extractor-jsonpath "${extractor}" +} + +horreum_schema_label_present '$.metadata.env.ARTIFACT_DIR' +horreum_schema_label_present '$.metadata.env.BUILD_ID' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/amd64".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/amd64".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/amd64".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/amd64".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/amd64".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/arm64".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/arm64".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/arm64".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/arm64".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/arm64".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/ppc64le".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/ppc64le".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/ppc64le".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/ppc64le".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/ppc64le".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/s390x".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/s390x".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/s390x".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/s390x".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/calculate-deps-linux/s390x".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/amd64".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/amd64".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/amd64".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/amd64".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/amd64".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/arm64".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/arm64".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/arm64".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/arm64".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/arm64".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/ppc64le".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/ppc64le".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/ppc64le".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/ppc64le".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/ppc64le".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/s390x".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/s390x".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/s390x".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/s390x".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.platformtaskruns."build/rpmbuild-linux/s390x".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/apply-tags".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/apply-tags".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/apply-tags".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/apply-tags".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/apply-tags".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/buildah".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/buildah".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/buildah".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/buildah".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/buildah".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/build-image-index".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/build-image-index".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/build-image-index".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/build-image-index".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/build-image-index".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/calculate-deps".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/calculate-deps".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/calculate-deps".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/calculate-deps".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/calculate-deps".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/check-noarch".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/check-noarch".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/check-noarch".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/check-noarch".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/check-noarch".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clair-scan".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clair-scan".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clair-scan".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clair-scan".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clair-scan".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clamav-scan".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clamav-scan".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clamav-scan".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clamav-scan".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/clamav-scan".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/coverity-availability-check".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/coverity-availability-check".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/coverity-availability-check".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/coverity-availability-check".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/coverity-availability-check".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/deprecated-image-check".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/deprecated-image-check".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/deprecated-image-check".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/deprecated-image-check".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/deprecated-image-check".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/ecosystem-cert-preflight-checks".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/ecosystem-cert-preflight-checks".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/ecosystem-cert-preflight-checks".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/ecosystem-cert-preflight-checks".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/ecosystem-cert-preflight-checks".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/get-rpm-sources".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/get-rpm-sources".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/get-rpm-sources".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/get-rpm-sources".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/get-rpm-sources".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone-oci-ta".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone-oci-ta".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone-oci-ta".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone-oci-ta".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone-oci-ta".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/git-clone".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/import-to-quay".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/import-to-quay".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/import-to-quay".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/import-to-quay".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/import-to-quay".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/init".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/init".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/init".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/init".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/init".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/push-dockerfile".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/push-dockerfile".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/push-dockerfile".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/push-dockerfile".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/push-dockerfile".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpmbuild".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpmbuild".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpmbuild".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpmbuild".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpmbuild".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpms-signature-scan".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpms-signature-scan".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpms-signature-scan".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpms-signature-scan".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/rpms-signature-scan".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-shell-check".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-shell-check".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-shell-check".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-shell-check".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-shell-check".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-snyk-check".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-snyk-check".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-snyk-check".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-snyk-check".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-snyk-check".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-unicode-check".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-unicode-check".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-unicode-check".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-unicode-check".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/sast-unicode-check".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/show-sbom".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/show-sbom".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/show-sbom".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/show-sbom".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/show-sbom".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/summary".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/summary".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/summary".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/summary".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."build/summary".passed.scheduled.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."test/test-output".passed.duration.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."test/test-output".passed.duration.samples' +horreum_schema_label_present '$.results.durations.stats.taskruns."test/test-output".passed.idle.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."test/test-output".passed.running.mean' +horreum_schema_label_present '$.results.durations.stats.taskruns."test/test-output".passed.scheduled.mean' +horreum_schema_label_present '$.results.errors.error_reasons_simple' +horreum_schema_label_present '$.results.measurements.HandleUser.error_rate' +horreum_schema_label_present '$.results.measurements.HandleUser.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.KPI.errors' +horreum_schema_label_present '$.results.measurements.KPI.mean' +horreum_schema_label_present '$.results.measurements.createApplication.error_rate' +horreum_schema_label_present '$.results.measurements.createApplication.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.createComponent.error_rate' +horreum_schema_label_present '$.results.measurements.createComponent.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.createIntegrationTestScenario.error_rate' +horreum_schema_label_present '$.results.measurements.createIntegrationTestScenario.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.createReleasePlan.error_rate' +horreum_schema_label_present '$.results.measurements.createReleasePlan.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.createReleasePlanAdmission.error_rate' +horreum_schema_label_present '$.results.measurements.createReleasePlanAdmission.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.getPaCPullNumber.error_rate' +horreum_schema_label_present '$.results.measurements.getPaCPullNumber.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateApplication.error_rate' +horreum_schema_label_present '$.results.measurements.validateApplication.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateComponent.error_rate' +horreum_schema_label_present '$.results.measurements.validateComponent.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validatePipelineRunCondition.error_rate' +horreum_schema_label_present '$.results.measurements.validatePipelineRunCondition.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validatePipelineRunCreation.error_rate' +horreum_schema_label_present '$.results.measurements.validatePipelineRunCreation.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validatePipelineRunSignature.error_rate' +horreum_schema_label_present '$.results.measurements.validatePipelineRunSignature.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateReleaseCondition.error_rate' +horreum_schema_label_present '$.results.measurements.validateReleaseCondition.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateReleaseCreation.error_rate' +horreum_schema_label_present '$.results.measurements.validateReleaseCreation.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateReleasePipelineRunCondition.error_rate' +horreum_schema_label_present '$.results.measurements.validateReleasePipelineRunCondition.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateReleasePipelineRunCreation.error_rate' +horreum_schema_label_present '$.results.measurements.validateReleasePipelineRunCreation.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateReleasePlan.error_rate' +horreum_schema_label_present '$.results.measurements.validateReleasePlan.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateReleasePlanAdmission.error_rate' +horreum_schema_label_present '$.results.measurements.validateReleasePlanAdmission.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateSnapshotCreation.error_rate' +horreum_schema_label_present '$.results.measurements.validateSnapshotCreation.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateTestPipelineRunCondition.error_rate' +horreum_schema_label_present '$.results.measurements.validateTestPipelineRunCondition.pass.duration.mean' +horreum_schema_label_present '$.results.measurements.validateTestPipelineRunCreation.error_rate' +horreum_schema_label_present '$.results.measurements.validateTestPipelineRunCreation.pass.duration.mean' diff --git a/tests/load-tests/ci-scripts/max-concurrency/cluster_read_config.yaml b/tests/load-tests/ci-scripts/max-concurrency/cluster_read_config.yaml index 115592d7cf..15c2d86abd 100644 --- a/tests/load-tests/ci-scripts/max-concurrency/cluster_read_config.yaml +++ b/tests/load-tests/ci-scripts/max-concurrency/cluster_read_config.yaml @@ -108,6 +108,7 @@ # Interesting CI environment variables {% for var in [ + 'ARTIFACT_DIR', 'BUILD_ID', 'HOSTNAME', 'JOB_NAME', diff --git a/tests/load-tests/ci-scripts/stage/cluster_read_config.yaml b/tests/load-tests/ci-scripts/stage/cluster_read_config.yaml index 70973834cb..588b86dad4 100644 --- a/tests/load-tests/ci-scripts/stage/cluster_read_config.yaml +++ b/tests/load-tests/ci-scripts/stage/cluster_read_config.yaml @@ -96,6 +96,7 @@ # Interesting CI environment variables {% for var in [ + 'ARTIFACT_DIR', 'BUILD_ID', 'BUILD_TAG', 'BUILD_URL', diff --git a/tests/load-tests/ci-scripts/stage/collect-results.sh b/tests/load-tests/ci-scripts/stage/collect-results.sh index c961b88509..cba3522b39 100755 --- a/tests/load-tests/ci-scripts/stage/collect-results.sh +++ b/tests/load-tests/ci-scripts/stage/collect-results.sh @@ -18,6 +18,8 @@ BASE_URL=$(echo $MEMBER_CLUSTER | grep -oP 'https://api\.\K[^:]+') PROMETHEUS_HOST="thanos-querier-openshift-monitoring.apps.$BASE_URL" TOKEN=${OCP_PROMETHEUS_TOKEN} +{ + echo "[$(date --utc -Ins)] Collecting artifacts" find . -maxdepth 1 -type f -name '*.log' -exec cp -vf {} "${ARTIFACT_DIR}" \; find . -maxdepth 1 -type f -name '*.csv' -exec cp -vf {} "${ARTIFACT_DIR}" \; @@ -34,27 +36,29 @@ python3 -m pip install -U pip python3 -m pip install -e "git+https://github.com/redhat-performance/opl.git#egg=opl-rhcloud-perf-team-core&subdirectory=core" python3 -m pip install tabulate python3 -m pip install matplotlib +echo "Content of the venv:" +python3 -m pip freeze } &>"${ARTIFACT_DIR}/monitoring-setup.log" echo "[$(date --utc -Ins)] Create summary JSON with timings" -./evaluate.py "${ARTIFACT_DIR}/load-test-timings.csv" "${ARTIFACT_DIR}/load-test-timings.json" +./evaluate.py "${ARTIFACT_DIR}/load-test-options.json" "${ARTIFACT_DIR}/load-test-timings.csv" "${ARTIFACT_DIR}/load-test-timings.json" -echo "[$(date --utc -Ins)] Counting PRs and TRs" -ci-scripts/utility_scripts/count-multiarch-taskruns.py --data-dir "${ARTIFACT_DIR}" >"${ARTIFACT_DIR}/count-multiarch-taskruns.log" +echo "[$(date --utc -Ins)] Create summary JSON with errors" +./errors.py "${ARTIFACT_DIR}/load-test-errors.csv" "${ARTIFACT_DIR}/load-test-timings.json" "${ARTIFACT_DIR}/load-test-errors.json" "${ARTIFACT_DIR}/collected-data/" || true echo "[$(date --utc -Ins)] Graphing PRs and TRs" -ci-scripts/utility_scripts/show-pipelineruns.py --data-dir "${ARTIFACT_DIR}" >"${ARTIFACT_DIR}/show-pipelineruns.log" || true +ci-scripts/utility_scripts/show-pipelineruns.py --data-dir "${ARTIFACT_DIR}" &>"${ARTIFACT_DIR}/show-pipelineruns.log" || true mv "${ARTIFACT_DIR}/output.svg" "${ARTIFACT_DIR}/show-pipelines.svg" || true echo "[$(date --utc -Ins)] Computing duration of PRs, TRs and steps" -ci-scripts/utility_scripts/get-taskruns-durations.py --data-dir "${ARTIFACT_DIR}" --dump-json "${ARTIFACT_DIR}/get-taskruns-durations.json" >"${ARTIFACT_DIR}/get-taskruns-durations.log" +ci-scripts/utility_scripts/get-taskruns-durations.py --debug --data-dir "${ARTIFACT_DIR}" --dump-json "${ARTIFACT_DIR}/get-taskruns-durations.json" &>"${ARTIFACT_DIR}/get-taskruns-durations.log" echo "[$(date --utc -Ins)] Creating main status data file" STATUS_DATA_FILE="${ARTIFACT_DIR}/load-test.json" status_data.py \ --status-data-file "${STATUS_DATA_FILE}" \ --set "name=Konflux loadtest" "started=$( cat started )" "ended=$( cat ended )" \ - --set-subtree-json "parameters.options=${ARTIFACT_DIR}/load-test-options.json" "results.measurements=${ARTIFACT_DIR}/load-test-timings.json" "results.durations=${ARTIFACT_DIR}/get-taskruns-durations.json" + --set-subtree-json "parameters.options=${ARTIFACT_DIR}/load-test-options.json" "results.measurements=${ARTIFACT_DIR}/load-test-timings.json" "results.errors=${ARTIFACT_DIR}/load-test-errors.json" "results.durations=${ARTIFACT_DIR}/get-taskruns-durations.json" echo "[$(date --utc -Ins)] Adding monitoring data" mstarted="$( date -d "$( cat started )" --utc -Iseconds )" @@ -75,4 +79,6 @@ status_data.py \ deactivate +} 2>&1 | tee "${ARTIFACT_DIR}/collect-results.log" + popd diff --git a/tests/load-tests/ci-scripts/utility_scripts/count-multiarch-taskruns.py b/tests/load-tests/ci-scripts/utility_scripts/count-multiarch-taskruns.py deleted file mode 100755 index 0b51c32325..0000000000 --- a/tests/load-tests/ci-scripts/utility_scripts/count-multiarch-taskruns.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python - -import argparse -import collections -import csv -import datetime -import json -import logging -import os -import os.path -import sys -import yaml -import time -import operator -import statistics -import re - -import tabulate - - -def str2date(date_str): - if isinstance(date_str, datetime.datetime): - return date_str - else: - try: - return datetime.datetime.fromisoformat(date_str) - except ValueError: # Python before 3.11 - # Convert "...Z" to "...+00:00" - date_str = date_str.replace("Z", "+00:00") - # Remove microseconds part - date_str = re.sub(r"(.*)(\.\d+)(\+.*)", r"\1\3", date_str) - # Convert simplified date - return datetime.datetime.fromisoformat(date_str) - -class DateTimeDecoder(json.JSONDecoder): - def __init__(self, *args, **kwargs): - super().__init__(object_hook=self.object_hook, *args, **kwargs) - - def object_hook(self, o): - ret = {} - for key, value in o.items(): - if isinstance(value, str): - try: - ret[key] = str2date(value) - except ValueError: - ret[key] = value - else: - ret[key] = value - return ret - -class Something: - def __init__(self, data_dir): - self.data_taskruns = [] - self.data_dir = data_dir - - self.tr_skips = 0 # how many TaskRuns we skipped - - self._populate(self.data_dir) - - def _load_json(self, path): - with open(path, "r") as fp: - return json.load(fp, cls=DateTimeDecoder) - - def _populate(self, data_dir): - for currentpath, folders, files in os.walk(data_dir): - for datafile in files: - datafile = os.path.join(currentpath, datafile) - - start = time.time() - if datafile.endswith(".yaml") or datafile.endswith(".yml"): - with open(datafile, "r") as fd: - data = yaml.safe_load(fd) - elif datafile.endswith(".json"): - try: - data = self._load_json(datafile) - except json.decoder.JSONDecodeError: - logging.warning(f"File {datafile} is malfrmed, skipping it") - continue - else: - continue - end = time.time() - logging.debug(f"Loaded {datafile} in {(end - start):.2f} seconds") - - if "kind" not in data: - logging.info(f"Skipping {datafile} as it does not contain kind") - continue - - if data["kind"] == "List": - if "items" not in data: - logging.info(f"Skipping {datafile} as it does not contain items") - continue - - for i in data["items"]: - self._populate_add_one(i) - else: - self._populate_add_one(data) - - print(f"We loaded {len(self.data_taskruns)} and skipped {self.tr_skips} TaskRuns") - - def _populate_add_one(self, something): - if "kind" not in something: - logging.info("Skipping item because it does not have kind") - return - - if something["kind"] == "TaskRun": - self._populate_taskrun(something) - else: - logging.debug(f"Skipping item because it has unexpected kind {something['kind']}") - return - - def _populate_taskrun(self, tr): - """Load TaskRun.""" - try: - tr_name = tr["metadata"]["name"] - except KeyError as e: - logging.info(f"TaskRun missing name, skipping: {e}, {str(tr)[:200]}") - self.tr_skips += 1 - return - - try: - tr_task = tr["metadata"]["labels"]["tekton.dev/pipelineTask"] - except KeyError as e: - logging.info( - f"TaskRun {tr_name} missing task, skipping: {e}" - ) - self.tr_skips += 1 - return - - try: - tr_conditions = tr["status"]["conditions"] - except KeyError as e: - logging.info(f"TaskRun {tr_name} missing conditions, skipping: {e}") - self.tr_skips += 1 - return - - tr_condition_ok = False - for c in tr_conditions: - if c["type"] == "Succeeded": - if c["status"] == "True": - tr_condition_ok = True - break - ###if not tr_condition_ok: - ### logging.info(f"TaskRun {tr_name} in wrong condition, skipping: {c}") - ### self.tr_skips += 1 - ### return - - try: - tr_creationTimestamp = str2date(tr["metadata"]["creationTimestamp"]) - tr_completionTime = str2date(tr["status"]["completionTime"]) - tr_startTime = str2date(tr["status"]["startTime"]) - tr_namespace = tr["metadata"]["namespace"] - except KeyError as e: - logging.info(f"TaskRun {tr_name} missing some fields, skipping: {e}") - self.tr_skips += 1 - return - - self.data_taskruns.append( - { - "namespace": tr_namespace, - "name": tr_name, - "task": tr_task, - "condition": tr_condition_ok, - "pending_duration": (tr_startTime - tr_creationTimestamp).total_seconds(), - "running_duration": (tr_completionTime - tr_startTime).total_seconds(), - "duration": (tr_completionTime - tr_creationTimestamp).total_seconds(), - } - ) - - def _show_multi_arch_tasks(self): - # All data - table_header = [ - "namespace", - "name", - "task", - "duration", - "condition", - ] - table = [] - for tr in self.data_taskruns: - table.append([ - tr["namespace"], - tr["name"], - tr["task"], - tr["duration"], - tr["condition"], - ]) - table.sort(key=operator.itemgetter(3)) - print("\nTaskRuns breakdown:\n") - print(tabulate.tabulate(table, headers=table_header)) - self._dump_as_csv("taskruns-breakdown-all.csv", table, table_header) - - # Per task average - data = {} - for tr in self.data_taskruns: - if not tr["condition"]: - continue # skip failed tasks - if tr["task"] not in data: - data[tr["task"]] = { - "count": 0, - "times": [], - } - data[tr["task"]]["count"] += 1 - data[tr["task"]]["times"].append(tr["duration"]) - table_header = [ - "task", - "duration_avg_sec", - "duration_stdev", - "duration_samples", - ] - table = [] - for t, v in data.items(): - table.append([ - t, - sum(v["times"]) / v["count"] if v["count"] > 0 else None, - statistics.stdev(v["times"]) if len(v["times"]) >= 2 else None, - v["count"], - ]) - table.sort(key=operator.itemgetter(1)) - print("\nTaskRuns breakdown averages by task (only successfull):\n") - print(tabulate.tabulate(table, headers=table_header, floatfmt=".0f")) - self._dump_as_csv("taskruns-breakdown-averages.csv", table, table_header) - - def _dump_as_csv(self, name, table, table_header): - name_full = os.path.join(self.data_dir, name) - with open(name_full, "w") as fd: - writer = csv.writer(fd) - writer.writerow(table_header) - for row in table: - writer.writerow(row) - - def doit(self): - self._show_multi_arch_tasks() - -def doit(args): - something = Something( - data_dir=args.data_dir, - ) - return something.doit() - - -def main(): - parser = argparse.ArgumentParser( - description="Show PipelineRuns and TaskRuns", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "--data-dir", - required=True, - help="Directory from where to load YAML data and where to put output SVG", - ) - parser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Show verbose output", - ) - parser.add_argument( - "-d", - "--debug", - action="store_true", - help="Show debug output", - ) - args = parser.parse_args() - - fmt = "%(asctime)s %(name)s %(levelname)s %(message)s" - if args.verbose: - logging.basicConfig(format=fmt, level=logging.INFO) - elif args.debug: - logging.basicConfig(format=fmt, level=logging.DEBUG) - else: - logging.basicConfig(format=fmt) - - logging.debug(f"Args: {args}") - - return doit(args) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tests/load-tests/ci-scripts/utility_scripts/get-taskruns-durations.py b/tests/load-tests/ci-scripts/utility_scripts/get-taskruns-durations.py index b7a7eeb40c..3b03ef7265 100755 --- a/tests/load-tests/ci-scripts/utility_scripts/get-taskruns-durations.py +++ b/tests/load-tests/ci-scripts/utility_scripts/get-taskruns-durations.py @@ -1,7 +1,6 @@ #!/usr/bin/env python import argparse -import collections import csv import datetime import json @@ -11,12 +10,9 @@ import sys import yaml import time -import operator import statistics import re -import tabulate - def str2date(date_str): if isinstance(date_str, datetime.datetime): @@ -32,6 +28,7 @@ def str2date(date_str): # Convert simplified date return datetime.datetime.fromisoformat(date_str) + class DateTimeDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): super().__init__(object_hook=self.object_hook, *args, **kwargs) @@ -48,6 +45,7 @@ def object_hook(self, o): ret[key] = value return ret + class Something: def __init__(self, data_dir, dump_json): self.data_pipelineruns = [] @@ -153,6 +151,12 @@ def _populate_taskrun(self, tr): assert len(_tr_succeeded) == 1, f"TaskRun should have exactly one 'Succeeded' condition: {_tr_succeeded}" tr_result = _tr_succeeded[0]["status"] == "True" + tr_platform = None + if "params" in tr["spec"]: + for p in tr["spec"]["params"]: + if p["name"] == "PLATFORM": + tr_platform = p["value"] + tr_steps = {} for s in tr["status"]["steps"]: try: @@ -172,7 +176,7 @@ def _populate_taskrun(self, tr): self.step_skips += 1 except KeyError as e: - logging.info(f"TaskRun incomplete, skipping: {e}, {str(tr)[:200]}") + logging.warning(f"TaskRun incomplete, skipping: {e}, {str(tr)[:200]}") self.tr_skips += 1 return @@ -184,6 +188,7 @@ def _populate_taskrun(self, tr): "creation": tr_creation_time, "start": tr_start_time, "completion": tr_completion_time, + "platform": tr_platform, "steps": tr_steps, }) @@ -252,7 +257,6 @@ def _merge_time_interval(self, new, existing): logging.info(f"Interval {self._format_interval(new)} does not collide with any member, adding it") return existing + [new] - def doit(self): # Normalize data into the structure we will use and do some cross checks data = {} @@ -288,6 +292,7 @@ def doit(self): "creation": tr["creation"], "start": tr["start"], "completion": tr["completion"], + "platform": tr["platform"], "steps": tr["steps"], } @@ -305,6 +310,8 @@ def doit(self): }, "taskruns": { }, + "platformtaskruns": { + }, "steps": { }, } @@ -346,6 +353,7 @@ def doit(self): for tr_name, tr_data in pr_data["taskruns"].items(): tr_id = f"{pr_id}/{tr_data['task']}" + ptr_id = f"{pr_id}/{tr_data['task']}-{tr_data['platform']}" logging.debug(f"Working on TaskRun {tr_id}") if tr_id not in result["taskruns"]: @@ -380,6 +388,27 @@ def doit(self): result["taskruns"][tr_id][tr_result]["scheduled"].append(tr_scheduled) result["taskruns"][tr_id][tr_result]["idle"].append(tr_idle) + if tr_data['platform'] is not None: + if ptr_id not in result["platformtaskruns"]: + result["platformtaskruns"][ptr_id] = { + "passed": { + "duration": [], + "running": [], + "scheduled": [], + "idle": [], + }, + "failed": { + "duration": [], + "running": [], + "scheduled": [], + "idle": [], + }, + } + result["platformtaskruns"][ptr_id][tr_result]["duration"].append(tr_duration) + result["platformtaskruns"][ptr_id][tr_result]["running"].append(tr_running) + result["platformtaskruns"][ptr_id][tr_result]["scheduled"].append(tr_scheduled) + result["platformtaskruns"][ptr_id][tr_result]["idle"].append(tr_idle) + for s_name, s_data in tr_data["steps"].items(): s_id = f"{tr_id}/{s_name}" logging.debug(f"Working on Step {s_id}") @@ -400,7 +429,7 @@ def doit(self): result["steps"][s_id][s_result]["duration"].append(s_duration) # Compute statistical data - for e in ("pipelineruns", "taskruns", "steps"): + for e in ("pipelineruns", "taskruns", "platformtaskruns", "steps"): for my_id, my_data1 in result[e].items(): for my_result, my_data2 in my_data1.items(): for my_stat, my_data3 in my_data2.items(): @@ -438,6 +467,7 @@ def doit(self): print("TaskRuns skipped:", self.tr_skips) print("Steps skipped:", self.step_skips) + def doit(args): something = Something( data_dir=args.data_dir, diff --git a/tests/load-tests/ci-scripts/utility_scripts/playwright-update-tokens.py b/tests/load-tests/ci-scripts/utility_scripts/playwright-update-tokens.py index afd146414c..bde60aba8d 100755 --- a/tests/load-tests/ci-scripts/utility_scripts/playwright-update-tokens.py +++ b/tests/load-tests/ci-scripts/utility_scripts/playwright-update-tokens.py @@ -2,7 +2,7 @@ # Docs: # This script uses credentials (username and password) from users.json -# to login to console.dev.redhat.com and generate new offline token. It +# to login to console.redhat.com and generate new offline token. It # saves updated content to users-new.json. # # Setup: @@ -29,63 +29,72 @@ import queue import os.path import sys +import traceback sys.path.append(os.path.dirname(os.path.realpath(__file__))) import playwright_lib -PLAYWRIGHT_HEADLESS = False +PLAYWRIGHT_HEADLESS = True PLAYWRIGHT_VIDEO_DIR = "videos/" def workload(user): - username = user["username"].replace("-", "_") - password = user["password"] - - with playwright.sync_api.sync_playwright() as p: - browser = p.chromium.launch( - headless=PLAYWRIGHT_HEADLESS, - ) - context = browser.new_context( - record_video_dir=PLAYWRIGHT_VIDEO_DIR, - ) - page = context.new_page() - - playwright_lib.goto_login_and_accept_cookies(page) - - playwright_lib.form_login(page, username, password) - - # Go to OpenShift Token page - page.goto("https://console.dev.redhat.com/openshift/token") - page.wait_for_url("https://console.dev.redhat.com/openshift/token**") - page.wait_for_selector('//h2[text()="Connect with offline tokens"]') - - # Wait for token - button_token = page.locator('//button[text()="Load token"]') - if button_token.is_visible(): - button_token.click() - attempt = 1 - attempt_max = 100 - while True: - input_token = page.locator( - '//input[@aria-label="Copyable token" and not(contains(@value, "ocm login "))]' - ) - input_token_value = input_token.get_attribute("value") - # Token value is populated assynchronously, so call it ready once - # it is longer than string "" or "null" - if len(input_token_value) > 10: - break - if attempt > attempt_max: - input_token_value = "Failed" - break - attempt += 1 - time.sleep(1) - print(f"Token for user {username}: {input_token_value}") + try: + username = user["username"].replace("-", "_") + password = user["password"] - page.close() - browser.close() + with playwright.sync_api.sync_playwright() as p: + browser = p.chromium.launch( + headless=PLAYWRIGHT_HEADLESS, + ) + context = browser.new_context( + record_video_dir=PLAYWRIGHT_VIDEO_DIR, + ) + page = context.new_page() + + playwright_lib.goto_login_and_accept_cookies(page) + + playwright_lib.form_login(page, username, password) + + # Go to OpenShift Token page + page.goto("https://console.redhat.com/openshift/token") + page.wait_for_url("https://console.redhat.com/openshift/token**") + + # Confirm I want to load a token + page.locator('a:has-text("use API tokens to authenticate")').click() + + # Wait for token + button_token = page.locator('//button[text()="Load token"]') + if button_token.is_visible(): + button_token.click() + attempt = 1 + attempt_max = 100 + while True: + input_token = page.locator( + '//input[@aria-label="Copyable token" and not(contains(@value, "ocm login "))]' + ) + input_token_value = input_token.get_attribute("value") + # Token value is populated assynchronously, so call it ready once + # it is longer than string "" or "null" + if len(input_token_value) > 10: + break + if attempt > attempt_max: + input_token_value = "Failed" + break + attempt += 1 + time.sleep(1) + print(f"Token for user {username}: {input_token_value}") + + page.close() + browser.close() + + user["token"] = input_token_value + return user - user["token"] = input_token_value - return user + except Exception as e: + print(f"[ERROR] Failed while processing {user['username']}") + traceback.print_exc() + raise def process_it(output_queue, user): @@ -104,8 +113,10 @@ def main(): users_allowlist = [] # keep empty to allow all for user in users: - if users_allowlist is not [] and user["username"] not in users_allowlist: + if users_allowlist != [] and user["username"] not in users_allowlist: + print(f"Skipping user {user['username']} as it is not in allow list") continue + result_queue = multiprocessing.Queue() process = multiprocessing.Process(target=process_it, args=(result_queue, user)) process.start() diff --git a/tests/load-tests/ci-scripts/utility_scripts/playwright_lib.py b/tests/load-tests/ci-scripts/utility_scripts/playwright_lib.py index 354cfee9d8..ab8630fc8b 100644 --- a/tests/load-tests/ci-scripts/utility_scripts/playwright_lib.py +++ b/tests/load-tests/ci-scripts/utility_scripts/playwright_lib.py @@ -6,7 +6,7 @@ def goto_login_and_accept_cookies(page): """Open a login page and accept cookies dialog""" - page.goto("https://console.dev.redhat.com") + page.goto("https://console.redhat.com") page.wait_for_url("https://sso.redhat.com/**") # Accept cookies @@ -14,7 +14,10 @@ def goto_login_and_accept_cookies(page): cookies_button = cookies_iframe.get_by_role( "button", name="Agree and proceed with standard settings" ) - cookies_button.click() + if cookies_button.is_visible(): + cookies_button.click() + else: + print("Cookies button not found or already clicked.") def form_login(page, username, password): @@ -30,5 +33,6 @@ def form_login(page, username, password): input_pass.wait_for(state="visible") input_pass.fill(password) page.locator('//button[@id="rh-password-verification-submit-button"]').click() - page.wait_for_url("https://console.dev.redhat.com/**") + page.wait_for_url("https://console.redhat.com/**") page.wait_for_selector('//h2[text()="Welcome to your Hybrid Cloud Console."]') + diff --git a/tests/load-tests/cluster_read_config.yaml b/tests/load-tests/cluster_read_config.yaml index caa92898c6..0d300dfeb8 100644 --- a/tests/load-tests/cluster_read_config.yaml +++ b/tests/load-tests/cluster_read_config.yaml @@ -108,6 +108,7 @@ # Interesting CI environment variables {% for var in [ + 'ARTIFACT_DIR', 'BUILD_ID', 'HOSTNAME', 'JOB_NAME', diff --git a/tests/load-tests/errors.py b/tests/load-tests/errors.py new file mode 100755 index 0000000000..23029c3594 --- /dev/null +++ b/tests/load-tests/errors.py @@ -0,0 +1,556 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +import collections +import csv +import json +import logging +import os +import re +import sys +import yaml + + +# Column indexes in input data +COLUMN_WHEN = 0 +COLUMN_CODE = 1 +COLUMN_MESSAGE = 2 + +# Errors patterns we recognize (when newlines were removed) +ERRORS = { + ("Application creation failed because it already exists", r"Application failed creation: Unable to create the Application .*: applications.appstudio.redhat.com .* already exists"), + ("Application creation failed because of TLS handshake timeout", r"Application failed creation: Unable to create the Application .*: failed to get API group resources: unable to retrieve the complete list of server APIs: appstudio.redhat.com/v1alpha1: Get .*: net/http: TLS handshake timeout"), + ("Application creation failed because resourcequota object has been modified", r"Application failed creation: Unable to create the Application [^ ]+: Operation cannot be fulfilled on resourcequotas [^ ]+: the object has been modified; please apply your changes to the latest version and try again"), + ("Application creation timed out waiting for quota evaluation", r"Application failed creation: Unable to create the Application .*: Internal error occurred: resource quota evaluation timed out"), + ("Build Pipeline Run was cancelled", r"Build Pipeline Run failed run: PipelineRun for component .* in namespace .* failed: .* Reason:Cancelled.*Message:PipelineRun .* was cancelled"), + ("Component creation failed because resourcequota object has been modified", r"Component failed creation: Unable to create the Component [^ ]+: Operation cannot be fulfilled on resourcequotas [^ ]+: the object has been modified; please apply your changes to the latest version and try again"), + ("Component creation timed out waiting for image-controller annotations", r"Component failed creation: Unable to create the Component .* timed out when waiting for image-controller annotations to be updated on component"), # obsolete + ("Component creation timed out waiting for image repository to be ready", r"Component failed creation: Unable to create the Component .* timed out waiting for image repository to be ready for component .* in namespace .*: context deadline exceeded"), + ("Couldnt get pipeline via bundles resolver from quay.io due to 429", r"Message:Error retrieving pipeline for pipelinerun .*bundleresolver.* cannot retrieve the oci image: GET https://quay.io/v2/.*unexpected status code 429 Too Many Requests"), + ("Couldnt get pipeline via git resolver from gitlab.cee due to 429", r"Message:.*resolver failed to get Pipeline.*error requesting remote resource.*Git.*https://gitlab.cee.redhat.com/.* status code: 429"), + ("Couldnt get pipeline via http resolver from gitlab.cee", r"Message:.*resolver failed to get Pipeline.*error requesting remote resource.*Http.*https://gitlab.cee.redhat.com/.* is not found"), + ("Couldnt get task via buldles resolver from quay.io due to 404", r"Message:.*Couldn't retrieve Task .*resolver type bundles.*https://quay.io/.* status code 404 Not Found"), + ("Couldnt get task via buldles resolver from quay.io due to 429", r"Message:.*Couldn't retrieve Task .*resolver type bundles.*https://quay.io/.* status code 429 Too Many Requests"), + ("Couldnt get task via buldles resolver from quay.io due to manifest unknown", r"Build Pipeline Run failed run: PipelineRun for component [^ ]+ in namespace [^ ]+ failed: .* Reason:CouldntGetTask Message:Pipeline [^ ]+ can't be Run; it contains Tasks that don't exist: Couldn't retrieve Task .resolver type bundles.*name = .* cannot retrieve the oci image: GET https://quay.io/[^ ]+: MANIFEST_UNKNOWN: manifest unknown"), + ("Couldnt get task via bundles resolver because control characters in yaml", r"Build Pipeline Run failed run: PipelineRun for component [^ ]+ in namespace [^ ]+ failed: .* Reason:CouldntGetTask Message:Pipeline [^ ]+ can't be Run; it contains Tasks that don't exist: Couldn't retrieve Task .resolver type bundles.*name = .* invalid runtime object: yaml: control characters are not allowed"), + ("Couldnt get task via bundles resolver from quay.io due to digest mismatch", r"Build Pipeline Run failed run: PipelineRun for component [^ ]+ in namespace [^ ]+ failed: .* Reason:CouldntGetTask Message:Pipeline [^ ]+ can't be Run; it contains Tasks that don't exist: Couldn't retrieve Task .resolver type bundles.*name = .*: error requesting remote resource: error getting \"bundleresolver\" .*: cannot retrieve the oci image: manifest digest: [^ ]+ does not match requested digest: [^ ]+ for .quay.io/"), + ("Couldnt get task via bundles resolver from quay.io due to unexpected end of JSON input", r"Build Pipeline Run failed run: PipelineRun for component .* in namespace .* failed: .* Reason:CouldntGetTask Message:Pipeline .* can't be Run; it contains Tasks that don't exist: Couldn't retrieve Task .resolver type bundles.*name = .*: error requesting remote resource: error getting \"bundleresolver\" .*: cannot retrieve the oci image: unexpected end of JSON input"), + ("Couldnt get task via git resolver from gitlab.cee due to 429", r"Message:.*Couldn't retrieve Task .*resolver type git.*https://gitlab.cee.redhat.com/.* status code: 429"), + ("Couldnt get task via git resolver from gitlab.cee due to 429", r"Reason:CouldntGetTask Message:.*Couldn't retrieve Task .resolver type git.*https://gitlab.cee.redhat.com/.* error requesting remote resource: error getting .Git. .*: error resolving repository: git clone error: Cloning into .* error: RPC failed; HTTP 429 curl 22 The requested URL returned error: 429 fatal: expected 'packfile': exit status 128"), + ("Couldnt get task via git resolver from gitlab.cee due to 429", r"Reason:CouldntGetTask Message:.*Couldn't retrieve Task .resolver type git.*https://gitlab.cee.redhat.com/.* error requesting remote resource: error getting .Git. .*: error resolving repository: git clone error: Cloning into .* remote: Retry later fatal: unable to access 'https://gitlab.cee.redhat.com/.*': The requested URL returned error: 429: exit status 128"), + ("Couldnt get task via git resolver from gitlab.cee due to 429", r"Reason:CouldntGetTask Message:.*Couldn't retrieve Task .resolver type git.*https://gitlab.cee.redhat.com/.* error requesting remote resource: error getting .Git. .*: git fetch error: error: RPC failed; HTTP 429 curl 22 The requested URL returned error: 429"), + ("Couldnt get task via git resolver from gitlab.cee due to 429", r"Reason:CouldntGetTask Message:.*Couldn't retrieve Task .resolver type git.*https://gitlab.cee.redhat.com/.* error requesting remote resource: error getting .Git. .*: git clone error: Cloning into .* error: RPC failed; HTTP 429 curl 22 The requested URL returned error: 429"), + ("Couldnt get task via git resolver from gitlab.cee due to 429", r"Reason:CouldntGetTask Message:.*Couldn't retrieve Task .resolver type git.*https://gitlab.cee.redhat.com/.* error requesting remote resource: error getting .Git. .*: git fetch error: error: RPC failed; HTTP 429 curl 22 The requested URL returned error: 429 fatal: expected 'acknowledgments': exit status 128"), + ("Couldnt get task via git resolver from gitlab.cee due to 429", r"Reason:CouldntGetTask Message:.*Couldn't retrieve Task .resolver type git.*https://gitlab.cee.redhat.com/.* error requesting remote resource: error getting .Git. .*: git fetch error: remote: Retry later fatal: unable to access .*: The requested URL returned error: 429: exit status 128"), + ("Couldnt get task via http resolver from gitlab.cee", r"Message:.*Couldn't retrieve Task .*resolver type http.*error getting.*requested URL .*https://gitlab.cee.redhat.com/.* is not found"), + ("Error deleting on-pull-request default PipelineRun", r"Repo-templating workflow component cleanup failed: Error deleting on-pull-request default PipelineRun in namespace .*: Unable to list PipelineRuns for component .* in namespace .*: context deadline exceeded"), + ("Error updating .tekton file in gitlab.cee.redhat.com", r"Repo-templating workflow component cleanup failed: Error templating PaC files: Failed to update file .tekton/[^ ]+ in repo .*: Failed to update/create file: PUT https://gitlab.cee.redhat.com/api/v4/projects/[^ ]+/repository/files/.tekton/.*: 400 .message: A file with this name doesn't exist."), + ("Failed application creation when calling mapplication.kb.io webhook", r"Application failed creation: Unable to create the Application .*: Internal error occurred: failed calling webhook .*mapplication.kb.io.*: failed to call webhook: Post .*https://application-service-webhook-service.application-service.svc:443/mutate-appstudio-redhat-com-v1alpha1-application.* no endpoints available for service .*application-service-webhook-service"), + ("Failed component creation because it already exists", r"Component failed creation: Unable to create the Component [^ ]+: components.appstudio.redhat.com \"[^ ]+\" already exists"), + ("Failed component creation because resource quota evaluation timed out", r"Component failed creation: Unable to create the Component .*: Internal error occurred: resource quota evaluation timed out"), + ("Failed component creation when calling mcomponent.kb.io webhook", r"Component failed creation: Unable to create the Component .*: Internal error occurred: failed calling webhook .*mcomponent.kb.io.*: failed to call webhook: Post .*https://application-service-webhook-service.application-service.svc:443/mutate-appstudio-redhat-com-v1alpha1-component.* no endpoints available for service .*application-service-webhook-service.*"), + ("Failed creating integration test scenario because admission webhook dintegrationtestscenario.kb.io could not find application", r"Integration test scenario failed creation: Unable to create the Integration Test Scenario [^ ]+: admission webhook \"dintegrationtestscenario.kb.io\" denied the request: could not find application '[^ ]+' in namespace '[^ ]+'"), + ("Failed creating integration test scenario because cannot set blockOwnerDeletion if an ownerReference refers to a resource you can't set finalizers on", r"Integration test scenario failed creation: Unable to create the Integration Test Scenario .* integrationtestscenarios.appstudio.redhat.com .* is forbidden: cannot set blockOwnerDeletion if an ownerReference refers to a resource you can't set finalizers on"), + ("Failed creating integration test scenario because it already exists", r"Integration test scenario failed creation: Unable to create the Integration Test Scenario .* integrationtestscenarios.appstudio.redhat.com .* already exists"), + ("Failed creating integration test scenario because of timeout", r"Integration test scenario failed creation: Unable to create the Integration Test Scenario [^ ]+ in namespace jhutar-tenant: context deadline exceeded"), + ("Failed getting PaC pull number because PaC public route does not exist", r"Component failed validation: Unable to get PaC pull number for component .* in namespace .*: PaC component .* in namespace .* failed on PR annotation: Incorrect state: .*\"error-message\":\"52: Pipelines as Code public route does not exist\""), + ("Failed Integration test scenario when calling dintegrationtestscenario.kb.io webhook", r"Integration test scenario failed creation: Unable to create the Integration Test Scenario .*: Internal error occurred: failed calling webhook .*dintegrationtestscenario.kb.io.*: failed to call webhook: Post .*https://integration-service-webhook-service.integration-service.svc:443/mutate-appstudio-redhat-com-v1beta2-integrationtestscenario.*: no endpoints available for service .*integration-service-webhook-service"), + ("Failed to add imagePullSecrets to build SA", r"Failed to configure pipeline imagePullSecrets: Unable to add secret .* to service account build-pipeline-.*: context deadline exceeded"), + ("Failed to git fetch from gitlab.cee due to connectivity issues", r"Error running git .fetch.*: exit status 128.*remote: Retry later.*fatal: unable to access 'https://gitlab.cee.redhat.com/[^ ]+': The requested URL returned error: 429.*Error fetching git repository: failed to fetch [^ ]+: exit status 128"), + ("Failed to link pipeline image pull secret to build service account because SA was not found", r"Failed to configure pipeline imagePullSecrets: Unable to add secret .* to service account .*: serviceaccounts .* not found"), + ("Failed to merge MR on CEE GitLab due to 405", r"Repo-templating workflow component cleanup failed: Merging [0-9]+ failed: [Pp][Uu][Tt] .*https://gitlab.cee.redhat.com/api/.*/merge_requests/[0-9]+/merge.*message: 405 Method Not Allowed"), + ("Failed to merge MR on CEE GitLab due to DNS error", r"Repo-templating workflow component cleanup failed: Merging [0-9]+ failed: [Pp][Uu][Tt] .*https://gitlab.cee.redhat.com/api/.*/merge_requests/[0-9]+/merge.*Temporary failure in name resolution"), + ("Failed validating release condition", r"Release .* in namespace .* failed: .*Message:Release validation failed.*"), + ("GitLab token used by test expired", r"Repo forking failed: Error deleting project .*: DELETE https://gitlab.cee.redhat.com/.*: 401 .*error: invalid_token.*error_description: Token is expired. You can either do re-authorization or token refresh"), + ("Pipeline failed", r"Build Pipeline Run failed run:.*Message:Tasks Completed: [0-9]+ \(Failed: [1-9]+,"), + ("Post-test data collection failed", r"Failed to collect application JSONs"), + ("Post-test data collection failed", r"Failed to collect pipeline run JSONs"), + ("Post-test data collection failed", r"Failed to collect release related JSONs"), + ("Release failed in progress without error given", r"Release failed: Release .* in namespace .* failed: .Type:Released Status:False .* Reason:Progressing Message:.$"), + ("Release failure: PipelineRun not created", r"couldn't find PipelineRun in managed namespace '%s' for a release '%s' in '%s' namespace"), + ("Release Pipeline failed", r"Release pipeline run failed:.*Message:Tasks Completed: [0-9]+ \(Failed: [1-9]+,"), + ("Repo forking failed as GitLab CEE says 401 Unauthorized", r"Repo forking failed: Error deleting project .*: DELETE https://gitlab.cee.redhat.com/.*: 401 .*message: 401 Unauthorized.*"), + ("Repo forking failed as GitLab CEE says 405 Method Not Allowed", r"Repo forking failed: Error deleting project [^ ]+: DELETE https://gitlab.cee.redhat.com/[^ ]+: 405 .message: Non GET methods are not allowed for moved projects."), + ("Repo forking failed as GitLab CEE says 500 Internal Server Error", r"Repo forking failed: Error deleting project .*: GET https://gitlab.cee.redhat.com/.*: 500 failed to parse unknown error format.*500: We're sorry, something went wrong on our end"), + ("Repo forking failed as the target is still being deleted", r"Repo forking failed: Error forking project .* POST https://gitlab.cee.redhat.com.* 409 .*Project namespace name has already been taken, The project is still being deleted"), + ("Repo forking failed as we got TLS handshake timeout talking to GitLab CEE", r"Repo forking failed: Error deleting project .*: Delete \"https://gitlab.cee.redhat.com/api/v4/projects/.*\": net/http: TLS handshake timeout"), + ("Repo forking failed as we got TLS handshake timeout talking to GitLab CEE", r"Repo forking failed: Error getting project [^ ]+: Get \"https://gitlab.cee.redhat.com/api/v4/projects/.*\": net/http: TLS handshake timeout"), + ("Repo forking failed because gitlab.com returned 503", r"Repo forking failed: Error checking repository .*: GET https://api.github.com/repos/.*: 503 No server is currently available to service your request. Sorry about that. Please try resubmitting your request and contact us if the problem persists.*"), + ("Repo forking failed because import failed", r"Repo forking failed: Error waiting for project [^ ]+ .ID: [0-9]+. fork to complete: Forking of project [^ ]+ .ID: [0-9]+. failed with import status: failed"), + ("Repo forking failed when deleting target repo on github.com because 504", r"Repo forking failed: Error deleting repository .*: DELETE https://api.github.com/repos/.*: 504 We couldn't respond to your request in time. Sorry about that. Please try resubmitting your request and contact us if the problem persists."), + ("Repo forking failed when deleting target repo on gitlab.com (not CEE!) due unathorized", r"Repo forking failed: Error deleting project .* DELETE https://gitlab.com/.* 401 .* Unauthorized"), + ("Repo templating failed when updating file on github.com because 500", r"Repo-templating workflow component cleanup failed: Error templating PaC files: Failed to update file .tekton/[^ ]+.yaml in repo [^ ]+ revision main: error when updating a file on github: PUT https://api.github.com/repos/[^ ]+: 500"), + ("Repo templating failed when updating file on github.com because 502", r"Repo-templating workflow component cleanup failed: Error templating PaC files: Failed to update file .tekton/[^ ]+.yaml in repo [^ ]+ revision main: error when updating a file on github: PUT https://api.github.com/repos/[^ ]+: 502 Server Error"), + ("Repo templating failed when updating file on github.com because 504", r"Repo-templating workflow component cleanup failed: Error templating PaC files: Failed to update file .tekton/[^ ]+.yaml in repo [^ ]+ revision main: error when updating a file on github: PUT https://api.github.com/repos/[^ ]+: 504 We couldn't respond to your request in time. Sorry about that. Please try resubmitting your request and contact us if the problem persists."), + ("Test Pipeline failed", r"Test Pipeline Run failed run:.*Message:Tasks Completed: [0-9]+ \(Failed: [1-9]+,"), + ("Timeout creating application calling mapplication.kb.io webhook", r"Application failed creation: Unable to create the Application [^ ]+: Internal error occurred: failed calling webhook .mapplication.kb.io.: failed to call webhook: Post .https://application-service-webhook-service.application-service.svc:443/mutate-appstudio-redhat-com-v1alpha1-application[^ ]+.: context deadline exceeded"), + ("Timeout forking the repo before the actual test", r"Repo forking failed: context deadline exceeded"), + ("Timeout forking the repo before the actual test", r"Repo forking failed: Error forking project .*: context deadline exceeded"), + ("Timeout forking the repo before the actual test", r"Repo forking failed: Error waiting for project [^ ]+ .ID: [0-9]+. fork to complete: context deadline exceeded"), + ("Timeout getting build service account", r"Component build SA not present: Component build SA .* not present: context deadline exceeded"), + ("Timeout getting PaC pull number when validating component", r"Component failed validation: Unable to get PaC pull number for component .* in namespace .*: context deadline exceeded"), + ("Timeout getting pipeline", r"Message:.*resolver failed to get Pipeline.*resolution took longer than global timeout of .*"), + ("Timeout getting task via git resolver from gitlab.cee", r"Message:.*Couldn't retrieve Task .*resolver type git.*https://gitlab.cee.redhat.com/.* resolution took longer than global timeout of .*"), + # Last time I seen this we discussed it here: + # + # https://redhat-internal.slack.com/archives/C04PZ7H0VA8/p1751530663606749 + # + # And it manifested itself by check on initial PR failing with: + # + # the namespace of the provided object does not match the namespace sent on the request + # + # And folks noticed this in the PaC controller logs: + # + # There was an error starting the PipelineRun test-rhtap-1-app-ryliu-comp-0-on-pull-request-, creating pipelinerun + # test-rhtap-1-app-ryliu-comp-0-on-pull-request- in namespace test-rhtap-1-tenant has failed. Tekton Controller has + # reported this error: ```Internal error occurred: failed calling webhook "vpipelineruns.konflux-ci.dev": failed + # to call webhook: Post "https://etcd-shield.etcd-shield.svc:443/validate-tekton-dev-v1-pipelinerun?timeout=10s": + # context deadline exceeded``` + ("Timeout listing pipeline runs", r"Repo-templating workflow component cleanup failed: Error deleting on-pull-request default PipelineRun in namespace .*: Unable to list PipelineRuns for component .* in namespace .*: context deadline exceeded"), + ("Timeout listing pipeline runs", r"Repo-templating workflow component cleanup failed: Error deleting on-push merged PipelineRun in namespace .*: Unable to list PipelineRuns for component .* in namespace .*: context deadline exceeded"), + ("Timeout onboarding component", r"Component failed onboarding: context deadline exceeded"), + ("Timeout waiting for build pipeline to be created", r"Build Pipeline Run failed creation: context deadline exceeded"), + ("Timeout waiting for integration test scenario to validate", r"Integration test scenario failed validation: context deadline exceeded"), + ("Timeout waiting for release pipeline to be created", r"Release pipeline run failed creation: context deadline exceeded"), + ("Timeout waiting for snapshot to be created", r"Snapshot failed creation: context deadline exceeded"), + ("Timeout waiting for test pipeline to create", r"Test Pipeline Run failed creation: context deadline exceeded"), + ("Timeout waiting for test pipeline to finish", r"Test Pipeline Run failed run: context deadline exceeded"), + ("Unable to connect to server", r"Error: Unable to connect to server"), +} + +# Generic guideline on constructing error reasons: +FAILED_PLR_ERRORS = { + ("SKIP", r"Skipping step because a previous step failed"), # This is a special "wildcard" error, let's keep it on top and do not change "SKIP" reason as it is used in the code + ("Bad Gateway when pulling container image from quay.io", r"Error: initializing source docker://quay.io/[^ ]+: reading manifest [^ ]+ in quay.io/[^ ]+: received unexpected HTTP status: 502 Bad Gateway "), + ("buildah build failed to pull container from registry.access.redhat.com because digest mismatch", r"buildah build.*FROM registry.access.redhat.com/[^ ]+ Trying to pull registry.access.redhat.com/[^ ]+ Error: creating build container: internal error: unable to copy from source docker://registry.access.redhat.com/[^ ]+: copying system image from manifest list: parsing image configuration: Download config.json digest [^ ]+ does not match expected [^ ]+"), + ("buildah build failed to pull container from registry.access.redhat.com because of 403", r"Error: creating build container: internal error: unable to copy from source docker://registry.access.redhat.com/.*: copying system image from manifest list: determining manifest MIME type for docker://registry.access.redhat.com/.*: reading manifest .* in registry.access.redhat.com/.*: StatusCode: 403"), + ("buildah build failed to pull container from registry.access.redhat.com because of 500 Internal Server Error", r"buildah build.*FROM registry.access.redhat.com/[^ ]+ Trying to pull registry.access.redhat.com/[^ ]+ Getting image source signatures Error: creating build container: internal error: unable to copy from source docker://registry.access.redhat.com/[^ ]+ copying system image from manifest list: reading signatures: reading signature from https://access.redhat.com/[^ ]+ received unexpected HTTP status: 500 Internal Server Error"), + ("buildah failed to pull image from Quay.io because unauthorized", r"Executing: unshare .* buildah pull .* Trying to pull quay.io/[^ ]+ Error: internal error: unable to copy from source docker://quay.io/[^ ]+: initializing source docker://quay.io/[^ ]+: reading manifest [^ ]+ in quay.io/[^ ]+: unauthorized: access to the requested resource is not authorized warning: Command failed and will retry, 1 try error: Unauthorized error, wrong registry credentials provided, won't retry Failed to pull base image quay.io/[^ ]+"), + ("Can not find chroot_scan.tar.gz file", r"tar: .*/chroot_scan.tar.gz: Cannot open: No such file or directory"), + ("Can not find Dockerfile", r"Cannot find Dockerfile [^ ]+"), + ("DNF failed to download repodata from Download Devel because could not resolve host", r"Errors during downloading metadata for repository '[^ ]+': - Curl error .6.: Couldn't resolve host name for http://download.devel.redhat.com/brewroot/repos/[^ ]+ .Could not resolve host: download\.devel\.redhat\.com."), + ("DNF failed to download repodata from Download Devel because timeout", r"dnf.exceptions.RepoError: Failed to download metadata for repo 'build': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried .* CRITICAL Error: Failed to download metadata for repo 'build': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried [^ ]+/mock/.*Failed to connect to download-[0-9]+.beak-[0-9]+.prod.iad2.dc.redhat.com"), + ("DNF failed to download repodata from Download Devel because timeout", r"dnf.exceptions.RepoError: Failed to download metadata for repo 'build': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried .* CRITICAL Error: Failed to download metadata for repo 'build': Cannot download repomd.xml: Cannot download repodata/repomd.xml: All mirrors were tried .*/mock/.*Failed to connect to download.devel.redhat.com"), + ("DNF failed to download repodata from Koji", r"ERROR Command returned error: Failed to download metadata (baseurl: \"https://kojipkgs.fedoraproject.org/repos/[^ ]*\") for repository \"build\": Usable URL not found"), + ("DNF failed to install package because GPG check failed", r"dnf -y install .* is not signed.*Error: GPG check FAILED.*exit status 1"), + ("Enterprise contract results failed validation", r"^false *$"), + ("Error allocating host as provision TR already exists", r"Error allocating host: taskruns.tekton.dev \".*provision\" already exists"), + ("Error allocating host because of insufficient free addresses in subnet", r"Error allocating host: failed to launch EC2 instance for .* operation error EC2: RunInstances, https response error StatusCode: 400, RequestID: .*, api error InsufficientFreeAddressesInSubnet: There are not enough free addresses in subnet .* to satisfy the requested number of instances."), + ("Error allocating host because of provisioning error", r"Error allocating host: failed to provision host"), + ("Failed because CPU is not x86-64-v4", r"ERROR: CPU is not x86-64-v4, aborting build."), + ("Failed because of quay.io returned 502", r"level=fatal msg=.Error parsing image name .*docker://quay.io/.* Requesting bearer token: invalid status code from registry 502 .Bad Gateway."), + ("Failed because registry.access.redhat.com returned 503 when reading manifest", r"source-build:ERROR:command execution failure, status: 1, stderr: time=.* level=fatal msg=.Error parsing image name .* reading manifest .* in registry.access.redhat.com/.* received unexpected HTTP status: 503 Service Unavailable"), + ("Failed downloading rpms for hermetic builds due to 504 errors", r"mock-hermetic-repo.*urllib3.exceptions.MaxRetryError: HTTPSConnectionPool.*: Max retries exceeded with url: .*.rpm .Caused by ResponseError..too many 504 error responses..."), + ("Failed downloading rpms for hermetic builds", r"mock-hermetic-repo.*ERROR:__main__:RPM deps downloading failed"), + ("Failed reading signatures from access.redhat.com due to 500", r"internal error: unable to copy from source docker://registry.access.redhat.com/[^ ]+: copying system image from manifest list: reading signatures: reading signature from https://access.redhat.com/webassets/docker/content/sigstore/[^ ]+: received unexpected HTTP status: 500 Internal Server Error"), + ("Failed to compile with clang", r"ERROR: [^ ]+: Compiling [^ ]+.cc .for tool. failed: .Exit 1.: clang-[0-9]+ failed: error executing CppCompile command"), + ("Failed to compile with clang", r"ERROR: [^ ]+: Compiling [^ ]+.c failed: .Exit 1.: clang-[0-9]+ failed: error executing CppCompile command"), + ("Failed to connect to MPC VM", r"ssh: connect to host [0-9]+.[0-9]+.[0-9]+.[0-9]+ port 22: Connection timed out"), + ("Failed to prefetch dependencies due to download timeout", r"ERROR Unsuccessful download: .* ERROR FetchError: exception_name: TimeoutError.*If the issue seems to be on the cachi2 side, please contact the maintainers."), + ("Failed to prefetch dependencies due to go env error", r"ERROR PackageManagerError: Go execution failed: .go env GOWORK. failed with rc=1"), + ("Failed to prefetch dependencies due to invalid input when fetching tags", r"Executing: git fetch --tags.*ERROR InvalidInput: 1 validation error for user input.*packages.*Value error, package path does not exist .or is not a directory.:"), + ("Failed to prefetch dependencies due to subscription-manager failed to register because system is already registered", r"Executing: subscription-manager register --org [^ ]+ --activationkey [^ ]+ This system is already registered. Use --force to override error: Command failed after [0-9]+ tries with status 64 Subscription-manager register failed"), + ("Failed to provision MPC VM due to resource quota evaluation timed out", r"cat /ssh/error Error allocating host: Internal error occurred: resource quota evaluation timed out"), # KONFLUX-9798 + ("Failed to pull container from access.redhat.com because of DNS error", r"Error: creating build container: internal error: unable to copy from source docker://registry.access.redhat.com/.*: copying system image from manifest list: reading signatures: Get \"https://access.redhat.com/.*\": dial tcp: lookup access.redhat.com: Temporary failure in name resolution"), + ("Failed to pull container from quay.io because of DNS error", r"Error: copying system image from manifest list: reading blob .*: Get \"https://cdn[0-9]+.quay.io/.*\": dial tcp: lookup cdn[0-9]+.quay.io: Temporary failure in name resolution"), + ("Failed to pull container from quay.io due to 404", r"Error response from registry: recognizable error message not found: PUT .https://quay.io/[^ ]+.: response status code 404: Not Found Command exited with non-zero status 1"), + ("Failed to pull container from registry.access.redhat.com because of 500 Internal Server Error", r"Trying to pull registry.access.redhat.com/[^ ]+ Getting image source signatures Error: copying system image from manifest list: reading signatures: reading signature from https://access.redhat.com/[^ ]+: status 500 .Internal Server Error."), + ("Failed to pull container from registry.access.redhat.com because of DNS error", r"Error: initializing source docker://registry.access.redhat.com/.* pinging container registry registry.access.redhat.com: Get \"https://registry.access.redhat.com/v2/\": dial tcp: lookup registry.access.redhat.com: Temporary failure in name resolution"), + ("Failed to pull container from registry.access.redhat.com because of remote tls error", r"Error: creating build container: internal error: unable to copy from source docker://registry.access.redhat.com/[^ ]+ copying system image from manifest list: reading blob [^ ]+: Get .https://cdn[0-9]+.quay.io/[^ ]+ remote error: tls: internal error"), + ("Failed to pull container from registry.access.redhat.com because of remote tls error", r"Trying to pull registry.access.redhat.com/[^ ]+ Error: copying system image from manifest list: parsing image configuration: Get .https://cdn[0-9]+.quay.io/[^ ]+ remote error: tls: internal error"), + ("Failed to pull container from registry.access.redhat.com because of unauthorized", r"Error: creating build container: internal error: unable to copy from source docker://registry.access.redhat.com/[^ ]+: initializing source docker://registry.access.redhat.com/[^ ]+: unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials."), + ("Failed to pull container from registry.access.redhat.com because of unauthorized", r"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. .* subprocess.CalledProcessError: Command ...podman....pull.*registry.access.redhat.com/.* returned non-zero exit status 125"), + ("Failed to pull container from registry.access.redhat.com because of unauthorized", r"unable to retrieve auth token: invalid username/password: unauthorized: Please login to the Red Hat Registry using your Customer Portal credentials. .* subprocess.CalledProcessError: Command ...skopeo....inspect.*docker://registry.access.redhat.com/.* returned non-zero exit status 1"), + ("Failed to pull container from registry.fedoraproject.org", r"Error: internal error: unable to copy from source docker://registry.fedoraproject.org/[^ ]+: initializing source docker://registry.fedoraproject.org/[^ ]+: pinging container registry registry.fedoraproject.org: Get \"https://registry.fedoraproject.org/v2/\": dial tcp [^ ]+: connect: connection refused"), + ("Failed to push SBOM to quay.io", r"Uploading SBOM file for [^ ]+ to [^ ]+ with mediaType [^ ]+. Error: Get .https://quay.io/v2/.: dial tcp .[0-9a-f:]+.:443: connect: network is unreachable [^ ]+: error during command execution: Get .https://quay.io/v2/.: dial tcp .[0-9a-f:]+.:443: connect: network is unreachable"), + ("Failed to push SBOM to quay.io", r"Uploading SBOM file for [^ ]+ to [^ ]+ with mediaType [^ ]+. Error: PUT https://quay.io/v2/[^ ]+: unexpected status code 200 OK [^ ]+: error during command execution: PUT https://quay.io/v2/[^ ]+: unexpected status code 200 OK"), + ("Failed to push to quai.io due to 404", r"Error response from registry: recognizable error message not found: PUT \"https://quay.io/[^ ]+\": response status code 404"), + ("Failed to ssh to remote MPC VM", r"[^ ]+@[0-9.]+: Permission denied .publickey,gssapi-keyex,gssapi-with-mic..\s*$"), # KONFLUX-9742 + ("Gateway Time-out when pulling container image from quay.io", r"Error: initializing source docker://quay.io/[^ ]+: reading manifest [^ ]+ in quay.io/[^ ]+: received unexpected HTTP status: 504 Gateway Time-out"), + ("Gateway Time-out when pulling container image", r"Error: copying system image from manifest list: parsing image configuration: fetching blob: received unexpected HTTP status: 504 Gateway Time-out"), + ("Getting repo tags from quay.io failed because of 502 Bad Gateway", r"Error determining repository tags: pinging container registry quay.io: received unexpected HTTP status: 502 Bad Gateway"), + ("Git failed to clone submodule because GitLab CEE giving 429", r"Error running git .*: exit status 1.*error: RPC failed; HTTP 429 curl 22 The requested URL returned error: 429.*fatal: clone of 'https://gitlab.cee.redhat.com/[^ ]+' into submodule path '[^ ]+' failed"), + ("Git failed to clone submodule because GitLab CEE giving 429", r"Error running git .*: exit status 1.*error: RPC failed; HTTP 429 curl 22 The requested URL returned error: 429.*fatal: Fetched in submodule path '[^ ]+', but it did not contain [^ ]+. Direct fetching of that commit failed"), + ("Git failed to fetch because GitLab CEE giving 429", r"Error running git .*: exit status 128.*error: RPC failed; HTTP 429 curl 22 The requested URL returned error: 429.*Error fetching git repository: failed to fetch [^ ]+: exit status 128"), + ("Git failed to fetch because GitLab CEE giving 429", r"Error running git .*: exit status 1.*remote: Retry later.*fatal: unable to access 'https://gitlab.cee.redhat.com/.*': The requested URL returned error: 429.*Error fetching git repository: failed to fetch [^ ]+: exit status 1"), + ("Git failed to fetch because GitLab CEE giving 429", r"Error running git .*: exit status 1.*remote: Retry later.*fatal: unable to access 'https://gitlab.cee.redhat.com/.*': The requested URL returned error: 429.*Error fetching git repository: exit status 1"), + ("Go failed installation because it was killed", r"Run buildah build .* go install [^ ]+ subprocess exited on killed subprocess exited with status 1"), + ("Introspection failed because of incomplete .docker/config.json", r".* level=fatal msg=\"Error parsing image name .*: getting username and password: reading JSON file .*/tekton/home/.docker/config.json.*: unmarshaling JSON at .*: unexpected end of JSON input\""), + ("Invalid reference when processing SBOM", r"SBOM .* error during command execution: could not parse reference: quay.io/[^ ]+"), + ("No podman installed on a MPC VM", r"remote_cmd podman unshare setfacl .* \+ ssh -o StrictHostKeyChecking=no [^ ]+ podman unshare setfacl .* bash: line 1: podman: command not found"), # KONFLUX-9944 + ("oras failed to fetch blob from Quay.io because it was terminated", r"Executing: oras blob fetch --registry-config [^ ]+ quay.io/[^ ]+ --output [^ ]+ Terminated"), + ("Prefetch dependencies failed to download from rhsm-pulp.corp.redhat.com because not whole content was fetched", r"Reading RPM lockfile: [^ ]+ .* Unsuccessful download: https://rhsm-pulp.corp.redhat.com/content/[^ ]+ .* ERROR FetchError: exception_name: ClientPayloadError, details: Response payload is not completed: .ContentLengthError: 400, message='Not enough data to satisfy content length header.'."), + ("Prefetch dependencies failed to download from download.devel.redhat.com because of timeout", r"Reading RPM lockfile: [^ ]+ .* Unsuccessful download: https://download.devel.redhat.com/[^ ]+ .* ERROR FetchError: exception_name: TimeoutError, details: Error: FetchError: exception_name: TimeoutError,"), + ("Release failed because unauthorized when pulling policy", r"Error: pulling policy: GET .https://quay.io/v2/konflux-ci/konflux-vanguard/data-acceptable-bundles/blobs/sha256:[0-9a-z]+.: response status code 401: Unauthorized"), + ("Release failed because unauthorized when pushing artifact", r"Prepared artifact from /var/workdir/release .* Token not found for quay.io/konflux-ci/release-service-trusted-artifacts Uploading [0-9a-z]+ sourceDataArtifact Error response from registry: unauthorized: access to the requested resource is not authorized: map.. Command exited with non-zero status 1"), + ("RPM build failed: bool cannot be defined via typedef", r"error: .bool. cannot be defined via .typedef..*error: Bad exit status from /var/tmp/rpm-tmp..* ..build."), + ("Script gather-rpms.py failed because of too many values to unpack", r"Handling archdir [^ ]+ Traceback.*File \"/usr/bin/gather-rpms.py\".*nvr, btime, size, sigmd5, _ = .*ValueError: too many values to unpack"), + ("Script merge_catalogs.sh due to permission error", r"RUN ./merge_catalogs.sh /bin/sh: line 1: ./merge_catalogs.sh: Permission denied subprocess exited with status 126 subprocess exited with status 126"), + ("Script mock-hermetic-repo failed because pull from registry.access.redhat.com failed", r"Error: internal error: unable to copy from source docker://registry.access.redhat.com/[^ ]+: determining manifest MIME type for docker://registry.access.redhat.com/[^ ]+: Manifest does not match provided manifest digest [^ ]+.*/usr/bin/mock-hermetic-repo.*subprocess.CalledProcessError.*Command ...podman....pull.* returned non-zero exit status 125"), + ("Script mock-hermetic-repo failed because pull from registry.access.redhat.com failed", r"mock-hermetic-repo.*Error: internal error: unable to copy from source docker://registry.access.redhat.com/[^ ]+: determining manifest MIME type for docker://registry.access.redhat.com/[^ ]+: Manifest does not match provided manifest digest.*subprocess.CalledProcessError.*Command ...podman....pull.* returned non-zero exit status 125"), + ("Script mock-hermetic-repo failed because pull from registry.access.redhat.com failed", r"/usr/bin/mock-hermetic-repo.*Error: internal error: unable to copy from source docker://registry.access.redhat.com/[^ ]+: initializing source docker://registry.access.redhat.com/[^ ]+: unable to retrieve auth token: invalid username/password: unauthorized.*subprocess.CalledProcessError.*Command '.'podman', 'pull', '--arch', '[^ ]+', 'registry.access.redhat.com/[^ ]+'.' returned non-zero exit status 125"), + ("Script opm failed because catalog is missing", r"Running opm alpha render-template basic -o yaml [^ ]+ . catalog/catalog-prod.yaml.*unable to open .catalog/[^ ]+.: open catalog/[^ ]+: no such file or directory"), + ("Script opm failed because catalog is missing", r"Running opm alpha render-template basic -o yaml [^ ]+ --migrate-level=bundle-object-to-csv-metadata . catalog/catalog-prod.yaml.*unable to open .catalog/[^ ]+.: open catalog/[^ ]+: no such file or directory"), + ("Script opm failed because failed to pull image from registry.redhat.io", r"Running opm alpha render-template basic -o yaml [^ ]+ . [^ ]+.*render reference .registry.redhat.io/[^ ]+.: failed to pull image .registry.redhat.io/[^ ]+.: Source image rejected: reading signature from https://registry.redhat.io/[^ ]+: received unexpected HTTP status: 500 Internal Server Error"), + ("Script opm failed to load or rebuild cache because some package has duplicate bundle", r"level=fatal msg=.failed to load or rebuild cache: failed to rebuild cache: build package index: process package [^ ]+: package [^ ]+ has duplicate bundle [^ ]+..*Error: building at STEP .RUN /bin/opm serve .*.: exit status 1"), + ("Script oras failed to fetch blob from Quay after 10 retries", r"Executing: oras blob fetch --registry-config [^ ]+ quay.io/[^ ]+ --output - error: Command failed after 10 tries with status 141"), + ("Script rpm_verifier failed to access image layer from quay.io because 502 Bad Gateway", r"rpm_verifier --image-url quay.io/.* Image: quay.io/.* error: unable to access the source layer sha256:[0-9a-z]+: received unexpected HTTP status: 502 Bad Gateway"), + ("Script rpm_verifier failed to pull image from quay.io because 502 Bad Gateway", r"rpm_verifier.*error: unable to read image quay.io/[^ ]+: Get .https://quay.io/[^ ]+.: received unexpected HTTP status: 502 Bad Gateway"), +} + +FAILED_TR_ERRORS = { + ("Missing expected fields in TaskRun", r"Missing expected fields in TaskRun"), # This is special error, meaning everithing failed basically + ("Missing expected TaskRun file", r"Missing expected TaskRun file"), # Another special error, meaning everithing failed as well + ("SKIP", r"\"message\": \"All Steps have completed executing\""), # Another special error to avoid printing 'Unknown error:' message + ("SKIP", r"\"message\": \".* exited with code 1.*\""), # Another special error to avoid printing 'Unknown error:' message + ("SKIP", r"\"message\": \".* exited with code 255.*\""), # Another special error to avoid printing 'Unknown error:' message + ("Back-off pulling task run image from quay.io", r"the step .* in TaskRun .* failed to pull the image .*. The pod errored with the message: \\\"Back-off pulling image \\\"quay.io/.*"), + ("Back-off pulling task run image from registry.access.redhat.com", r"the step .* in TaskRun .* failed to pull the image .*. The pod errored with the message: \\\"Back-off pulling image \\\"registry.access.redhat.com/.*"), + ("Back-off pulling task run image from registry.redhat.io", r"the step .* in TaskRun .* failed to pull the image .*. The pod errored with the message: \\\"Back-off pulling image \\\"registry.redhat.io/.*"), + ("Build failed for unspecified reasons", r"build failed for unspecified reasons."), + ("Failed to create task run pod because ISE on webhook proxy.operator.tekton.dev", r"failed to create task run pod .*: Internal error occurred: failed calling webhook \\\"proxy.operator.tekton.dev\\\": failed to call webhook: Post \\\"https://tekton-operator-proxy-webhook.openshift-pipelines.svc:443/defaulting.timeout=10s\\\": context deadline exceeded. Maybe missing or invalid Task .*"), + ("Not enough nodes to schedule pod", r".message.: .pod status ..PodScheduled..:..False..; message: ..[0-9/]+ nodes are available: .*: [0-9]+ Preemption is not helpful for scheduling."), + ("Pod creation failed because resource quota evaluation timed out", r".message.: .failed to create task run pod [^ ]+: Internal error occurred: resource quota evaluation timed out. Maybe missing or invalid Task [^ ]+., .reason.: .PodCreationFailed."), + ("Pod creation failed because serviceaccounts not found", r".message.: .failed to create task run pod [^ ]+: translating TaskSpec to Pod: serviceaccounts [^ ]+ not found. Maybe missing or invalid Task [^ ]+., .reason.: .PodCreationFailed."), + ("Pod creation failed with reason error", r"\"message\": \".* exited with code 2: Error\""), + ("Pod stuck in incorrect status", r".message.: .pod status ..PodReadyToStartContainers..:..False..; message: ....., .reason.: .Pending., .status.: .Unknown."), + ("TaskRun resolution failed because validation.webhook.pipeline.tekton.dev returned EOF", r".message.: .error requesting remote resource: error updating resource request [^ ]+ with data: Internal error occurred: failed calling webhook .*validation.webhook.pipeline.tekton.dev.*: failed to call webhook: Post .*https://tekton-pipelines-webhook.openshift-pipelines.svc:443/resource-validation.timeout=10s.*: EOF., .reason.: .TaskRunResolutionFailed."), + ("TaskRun was cancelled as its PipelineRun timeouted", r".message.: .TaskRun [^ ]+ was cancelled. TaskRun cancelled as the PipelineRun it belongs to has timed out.., .reason.: .TaskRunCancelled."), + ("TaskRun was cancelled as its PipelineRun was cancelled", r"TaskRun [^ ]+ was cancelled. TaskRun cancelled as the PipelineRun it belongs to has been cancelled."), + ("TaskRun was cancelled because it timeouted", r".message.: .TaskRun [^ ]+ failed to finish within [^ ]+., .reason.: .TaskRunTimeout."), +} + + +def message_to_reason(reasons_and_errors: set, msg: str) -> str: + """ + Classifies an error message using regular expressions and returns the error name. + + Args: + msg: The input error message string. + + Returns: + The name of the error if a pattern matches, otherwise string "UNKNOWN". + """ + msg = msg.replace("\n", " ") # Remove newlines + msg = msg[-250000:] # Just look at last 250k bytes + for error_name, pattern in reasons_and_errors: + if re.search(pattern, msg): + return error_name + print(f"Unknown error: {msg}") + return "UNKNOWN" + + +def add_reason(error_messages, error_by_code, error_by_reason, message, reason="", code=0): + if reason == "": + reason = message + error_messages.append(message) + error_by_code[code] += 1 + error_by_reason[reason] += 1 + + +def load(datafile): + if datafile.endswith(".yaml") or datafile.endswith(".yml"): + try: + with open(datafile, "r") as fd: + data = yaml.safe_load(fd) + except yaml.scanner.ScannerError: + raise Exception(f"File {datafile} is malfrmed YAML, skipping it") + elif datafile.endswith(".json"): + try: + with open(datafile, "r") as fp: + data = json.load(fp) + except json.decoder.JSONDecodeError: + raise Exception(f"File {datafile} is malfrmed JSON, skipping it") + else: + raise Exception("Unknown data file format") + + return data + + +def find_all_failed_plrs(data_dir): + for currentpath, folders, files in os.walk(data_dir): + for datafile in files: + if not datafile.startswith("collected-pipelinerun-"): + continue + + datafile = os.path.join(currentpath, datafile) + data = load(datafile) + + # Skip PLRs that did not failed + try: + succeeded = True + for c in data["status"]["conditions"]: + if c["type"] == "Succeeded": + if c["status"] == "False": # possibly switch this to `!= "True"` but that might be too big change for normal runs + succeeded = False + break + if succeeded: + continue + except KeyError: + continue + + yield data + + +def find_first_failed_build_plr(data_dir, plr_type): + """ This function is intended for jobs where we only run one concurrent + builds, so no more than one can failed: our load test probes. + + This is executed when test hits "Pipeline failed" error and this is + first step to identify task that failed so we can identify error in + the pod log. + + It goes through given data directory (probably "collected-data/") and + loads all files named "collected-pipelinerun-*" and checks that PLR is + a "build" PLR and it is failed one. + """ + + for plr in find_all_failed_plrs(data_dir): + if plr_type == "build": + plr_type_label = "build" + elif plr_type == "release": + plr_type_label = "managed" + else: + raise Exception("Unknown PLR type") + + # Skip PLRs that do not have expected type + try: + if plr["metadata"]["labels"]["pipelines.appstudio.openshift.io/type"] != plr_type_label: + continue + except KeyError: + continue + + return plr + + +def find_trs(plr): + try: + for tr in plr["status"]["childReferences"]: + yield tr["name"] + except KeyError: + return + + +def check_failed_taskrun(data_dir, ns, tr_name): + datafile = os.path.join(data_dir, ns, "0", "collected-taskrun-" + tr_name + ".json") + try: + data = load(datafile) + except FileNotFoundError as e: + print(f"ERROR: Missing file: {str(e)}") + return False, "Missing expected TaskRun file" + + try: + pod_name = data["status"]["podName"] + for condition in data["status"]["conditions"]: + if condition["type"] == "Succeeded": + break + except KeyError: + return False, "Missing expected fields in TaskRun" + else: + if pod_name == "": + return False, json.dumps(condition, sort_keys=True) + else: + return True, json.dumps(condition, sort_keys=True) + + +def find_failed_containers(data_dir, ns, tr_name): + datafile = os.path.join(data_dir, ns, "0", "collected-taskrun-" + tr_name + ".json") + data = load(datafile) + + try: + pod_name = data["status"]["podName"] + for sr in data["status"]["steps"]: + if sr["terminated"]["exitCode"] == 0: + continue + if sr["terminated"]["reason"] == "TaskRunCancelled": + continue + yield (pod_name, sr["container"]) + except KeyError: + return + + +def load_container_log(data_dir, ns, pod_name, cont_name): + datafile = os.path.join(data_dir, ns, "0", "pod-" + pod_name + "-" + cont_name + ".log") + print(f"Checking errors in {datafile}") + with open(datafile, "r") as fd: + return fd.read() + + +def investigate_failed_plr(dump_dir, plr_type="build"): + reasons = [] + + try: + plr = find_first_failed_build_plr(dump_dir, plr_type) + if plr is None: + return ["SORRY PLR not found"] + + plr_ns = plr["metadata"]["namespace"] + + for tr_name in find_trs(plr): + tr_ok, tr_message = check_failed_taskrun(dump_dir, plr_ns, tr_name) + + if tr_ok: + try: + for pod_name, cont_name in find_failed_containers(dump_dir, plr_ns, tr_name): + log_lines = load_container_log(dump_dir, plr_ns, pod_name, cont_name) + reason = message_to_reason(FAILED_PLR_ERRORS, log_lines) + + if reason == "SKIP": + continue + + reasons.append(reason) + except FileNotFoundError as e: + print(f"Failed to locate required files: {e}") + + reason = message_to_reason(FAILED_TR_ERRORS, tr_message) + if reason != "SKIP": + reasons.append(reason) + except Exception as e: + logging.exception("Investigating PLR failed") + return ["SORRY " + str(e)] + + reasons = list(set(reasons)) # get unique reasons only + reasons.sort() # sort reasons + return reasons + + +def main(): + input_file = sys.argv[1] + timings_file = sys.argv[2] + output_file = sys.argv[3] + dump_dir = sys.argv[4] + + error_messages = [] # list of error messages + error_by_code = collections.defaultdict( + lambda: 0 + ) # key: numeric error code, value: number of such errors + error_by_reason = collections.defaultdict( + lambda: 0 + ) # key: textual error reason, value: number of such errors + + try: + with open(input_file, "r") as fp: + csvreader = csv.reader(fp) + for row in csvreader: + if row == []: + continue + + code = row[COLUMN_CODE] + message = row[COLUMN_MESSAGE] + + reason = message_to_reason(ERRORS, message) + + if reason == "Pipeline failed": + reasons2 = investigate_failed_plr(dump_dir, "build") + reason = reason + ": " + ", ".join(reasons2) + + if reason == "Release Pipeline failed": + reasons2 = investigate_failed_plr(dump_dir, "release") + reason = reason + ": " + ", ".join(reasons2) + + add_reason(error_messages, error_by_code, error_by_reason, message, reason, code) + except FileNotFoundError: + print("No errors file found, good :-D") + + timings = {} + try: + with open(timings_file, "r") as fp: + timings = json.load(fp) + except FileNotFoundError: + print("No timings file found, strange :-/") + error_messages.append("No timings file found") + add_reason(error_messages, error_by_code, error_by_reason, "No timings file found") + + try: + if timings["KPI"]["mean"] == -1: + if len(error_messages) == 0: + add_reason(error_messages, error_by_code, error_by_reason, "No test run finished") + except KeyError: + print("No KPI metrics in timings data, strange :-(") + add_reason(error_messages, error_by_code, error_by_reason, "No KPI metrics in timings data") + + data = { + "error_by_code": error_by_code, + "error_by_reason": error_by_reason, + "error_reasons_simple": "; ".join([f"{v}x {k}" for k, v in error_by_reason.items() if k != "Post-test data collection failed"]), + "error_messages": error_messages, + } + + print(f"Errors detected: {len(error_messages)}") + print("Errors by reason:") + for k, v in error_by_reason.items(): + print(f" {v}x {k}") + + with open(output_file, "w") as fp: + json.dump(data, fp, indent=4) + print(f"Data dumped to {output_file}") + + +def investigate_all_failed_plr(dump_dir): + reasons = [] + + for plr in find_all_failed_plrs(dump_dir): + plr_ns = plr["metadata"]["namespace"] + + for tr_name in find_trs(plr): + tr_ok, tr_message = check_failed_taskrun(dump_dir, plr_ns, tr_name) + + if tr_ok: + try: + for pod_name, cont_name in find_failed_containers(dump_dir, plr_ns, tr_name): + log_lines = load_container_log(dump_dir, plr_ns, pod_name, cont_name) + reason = message_to_reason(FAILED_PLR_ERRORS, log_lines) + + if reason == "SKIP": + continue + + reasons.append(reason) + except FileNotFoundError as e: + print(f"Failed to locate required files: {e}") + + reason = message_to_reason(FAILED_TR_ERRORS, tr_message) + if reason != "SKIP": + reasons.append(reason) + + return sorted(reasons) + + +def main_custom(): + dump_dir = sys.argv[1] + output_file = os.path.join(dump_dir, "errors-output.json") + + error_messages = [] # list of error messages + error_by_code = collections.defaultdict( + lambda: 0 + ) # key: numeric error code, value: number of such errors + error_by_reason = collections.defaultdict( + lambda: 0 + ) # key: textual error reason, value: number of such errors + + reasons = investigate_all_failed_plr(dump_dir) + for r in reasons: + add_reason(error_messages, error_by_code, error_by_reason, r) + + data = { + "error_by_code": error_by_code, + "error_by_reason": error_by_reason, + "error_reasons_simple": "; ".join([f"{v}x {k}" for k, v in error_by_reason.items() if k != "Post-test data collection failed"]), + "error_messages": error_messages, + } + + print(f"Errors detected: {len(error_messages)}") + print("Errors by reason:") + for k, v in error_by_reason.items(): + print(f" {v}x {k}") + + with open(output_file, "w") as fp: + json.dump(data, fp, indent=4) + print(f"Data dumped to {output_file}") + + +if __name__ == "__main__": + if len(sys.argv) == 2: + # When examining just custom collected-data directory + sys.exit(main_custom()) + else: + sys.exit(main()) diff --git a/tests/load-tests/evaluate.py b/tests/load-tests/evaluate.py index 03d3f1d977..a643de696a 100755 --- a/tests/load-tests/evaluate.py +++ b/tests/load-tests/evaluate.py @@ -11,10 +11,14 @@ # Column indexes in input data COLUMN_WHEN = 0 -COLUMN_METRIC = 1 -COLUMN_DURATION = 2 -COLUMN_PARAMS = 3 -COLUMN_ERROR = 4 +COLUMN_PER_USER_T = 1 +COLUMN_PER_APP_T = 2 +COLUMN_PER_COMP_T = 3 +COLUMN_REPEATS_COUNTER = 4 +COLUMN_METRIC = 5 +COLUMN_DURATION = 6 +COLUMN_PARAMS = 7 +COLUMN_ERROR = 8 # Metrics we care about that together form KPI metric duration METRICS = [ @@ -23,14 +27,108 @@ "validateApplication", "createIntegrationTestScenario", "createComponent", + "getPaCPullNumber", + "validateComponent", "validatePipelineRunCreation", "validatePipelineRunCondition", "validatePipelineRunSignature", "validateSnapshotCreation", "validateTestPipelineRunCreation", "validateTestPipelineRunCondition", + "createReleasePlan", + "createReleasePlanAdmission", + "validateReleasePlan", + "validateReleasePlanAdmission", + "validateReleaseCreation", + "validateReleasePipelineRunCreation", + "validateReleasePipelineRunCondition", + "validateReleaseCondition", ] +# These metrics will be ignored if running on non-CI cluster +METRICS_CI = [ + "HandleUser", +] + +# These metrics will be ignored if ITS was skipped +METRICS_ITS = [ + "createIntegrationTestScenario", + "validateTestPipelineRunCreation", + "validateTestPipelineRunCondition", +] + +# These metrics will be ignored if Release was skipped +METRICS_RELEASE = [ + "createReleasePlan", + "createReleasePlanAdmission", + "validateReleasePlan", + "validateReleasePlanAdmission", + "validateReleaseCreation", + "validateReleasePipelineRunCreation", + "validateReleasePipelineRunCondition", + "validateReleaseCondition", +] + +# These metrics will be reused when we are reusing applications +METRICS_REUSE_APPLICATIONS = [ + "createApplication", + "validateApplication", + "createIntegrationTestScenario", + "createReleasePlan", + "createReleasePlanAdmission", + "validateReleasePlan", + "validateReleasePlanAdmission", +] + +# These metrics will be reused when we are reusing components +METRICS_REUSE_COMPONENTS = [ + "createComponent", + "getPaCPullNumber", + "validateComponent", +] + + +class SinglePass: + """Structure to record data about one specific pass through loadtest workload, identified by an identier (touple with loadtest's per user, per application and per component thread index and repeats counter.""" + + def __init__(self): + self._metrics = {} + + def __contains__(self, item): + return item in self._metrics + + def __getitem__(self, key): + return self._metrics[key] + + def add(self, metric, duration): + """Adds given metric to data about this pass.""" + assert metric not in self._metrics + self._metrics[metric] = duration + + def complete(self, expected_metrics): + """Checks if we have all expected metrics.""" + current = set(self._metrics.keys()) + return current == expected_metrics + + def total(self): + """Return total duration.""" + return sum(self._metrics.values()) + + @staticmethod + def i_matches(identifier1, identifier2): + """Check if first provided identifier matches second one. When we have -1 instead of some value(s) in the first identifier, it acts as a wildcard.""" + if identifier1[3] == -1 or identifier1[3] == identifier2[3]: + if identifier1[2] == -1 or identifier1[2] == identifier2[2]: + if identifier1[1] == -1 or identifier1[1] == identifier2[1]: + if identifier1[0] == -1 or identifier1[0] == identifier2[0]: + return True + return False + + @staticmethod + def i_complete(identifier): + """Check this is complete identifier (does not contain wildcards).""" + return -1 not in identifier + def str2date(date_str): if isinstance(date_str, datetime.datetime): @@ -50,13 +148,10 @@ def count_stats(data): if len(data) == 0: return { "samples": 0, - } - elif len(data) == 1: - return { - "samples": 1, - "min": data[0], - "mean": data[0], - "max": data[0], + "min": -1, + "max": -1, + "mean": -1, + "stdev": -1, } else: return { @@ -64,6 +159,7 @@ def count_stats(data): "min": min(data), "mean": statistics.mean(data), "max": max(data), + "stdev": statistics.stdev(data) if len(data) >= 2 else -1, } def count_stats_when(data): @@ -86,10 +182,48 @@ def count_stats_when(data): def main(): - input_file = sys.argv[1] - output_file = sys.argv[2] + options_file = sys.argv[1] + input_file = sys.argv[2] + output_file = sys.argv[3] + + # Load test options + with open(options_file, "r") as fp: + options = json.load(fp) + + # Determine what metrics we need to skip or reuse based on options + to_skip = [] + to_reuse = [] + if options["Stage"]: + print("NOTE: Ignoring CI cluster related metrics because running against non-CI cluster") + to_skip += METRICS_CI + if options["TestScenarioGitURL"] == "": + print("NOTE: Ignoring ITS related metrics because they were disabled at test run") + to_skip += METRICS_ITS + if options["ReleasePolicy"] == "": + print("NOTE: Ignoring Release related metrics because they were disabled at test run") + to_skip += METRICS_RELEASE + if options["JourneyReuseApplications"]: + print("NOTE: Will reuse application metrics as we were reusing applications") + to_reuse += METRICS_REUSE_APPLICATIONS + if options["JourneyReuseComponents"]: + print("NOTE: Will reuse component metrics as we were reusing components") + to_reuse += METRICS_REUSE_COMPONENTS + + # When processing, only consider these metrics + expected_metrics = set(METRICS) - set(to_skip) + reuse_metrics = set(to_reuse) - set(to_skip) stats_raw = {} + stats_passes = {} + + rows_incomplete = [] + + # Prepopulate stats_raw data structure + for m in expected_metrics: + stats_raw[m] = { + "pass": {"duration": [], "when": []}, + "fail": {"duration": [], "when": []}, + } with open(input_file, "r") as fp: csvreader = csv.reader(fp) @@ -98,55 +232,83 @@ def main(): continue when = str2date(row[COLUMN_WHEN]) - metric = row[COLUMN_METRIC] + per_user_t = int(row[COLUMN_PER_USER_T]) + per_app_t = int(row[COLUMN_PER_APP_T]) + per_comp_t = int(row[COLUMN_PER_COMP_T]) + repeats_counter = int(row[COLUMN_REPEATS_COUNTER]) + metric = row[COLUMN_METRIC].split(".")[-1] duration = float(row[COLUMN_DURATION]) error = row[COLUMN_ERROR] != "" - for m in METRICS: - if m not in stats_raw: - stats_raw[m] = { - "pass": {"duration": [], "when": []}, - "fail": {"duration": [], "when": []}, - } + if metric not in expected_metrics: + continue + + # First add this record to stats_raw that allows us to track stats per metric + stats_raw[metric]["fail" if error else "pass"]["duration"].append(duration) + stats_raw[metric]["fail" if error else "pass"]["when"].append(when) - if metric.endswith("." + m): - stats_raw[m]["fail" if error else "pass"]["duration"].append(duration) - stats_raw[m]["fail" if error else "pass"]["when"].append(when) + # Second add this record to stats_passes that allows us to track full completed passes + if not error: + identifier = (per_user_t, per_app_t, per_comp_t, repeats_counter) - # print(f"Raw stats: {stats_raw}") + if SinglePass.i_complete(identifier): + if identifier not in stats_passes: + stats_passes[identifier] = SinglePass() + stats_passes[identifier].add(metric, duration) + else: + # Safe this metric for later once we have all passes + rows_incomplete.append((identifier, metric, duration)) + + # Now when we have data about all passes, add metrics that had incomplete identifiers (with wildcards) + for incomplete in rows_incomplete: + identifier, metric, duration = incomplete + found = [v for k, v in stats_passes.items() if SinglePass.i_matches(identifier, k)] + for i in found: + i.add(metric, duration) + #print(f"Metric {metric} added from {identifier}") + + # Now add reused metrics if needed + for pass_id, pass_data in stats_passes.items(): + for reuse_metric in reuse_metrics: + if reuse_metric not in pass_data: + reuse_from_id = pass_id[:3] + (0,) + pass_data.add(reuse_metric, stats_passes[reuse_from_id][reuse_metric]) + #print(f"Metric {reuse_metric} reused from {reuse_from_id} to {pass_id}") + + #print("Raw stats:") + #print(json.dumps(stats_raw, indent=4, default=lambda o: '<' + str(o) + '>')) + #print(json.dumps({str(k): v for k, v in stats_passes.items()}, indent=4, default=lambda o: '<' + str(o._metrics) + '>')) stats = {} - kpi_sum = 0.0 + kpi_mean_data = [] + kpi_successes = 0 kpi_errors = 0 - for m in METRICS: - stats[m] = {"pass": {}, "fail": {}} - stats[m]["pass"]["duration"] = count_stats(stats_raw[m]["pass"]["duration"]) - stats[m]["fail"]["duration"] = count_stats(stats_raw[m]["fail"]["duration"]) - stats[m]["pass"]["when"] = count_stats_when(stats_raw[m]["pass"]["when"]) - stats[m]["fail"]["when"] = count_stats_when(stats_raw[m]["fail"]["when"]) - - if stats[m]["pass"]["duration"]["samples"] == 0: - # If we had 0 measurements in some metric, that means not a single - # build made it through all steps, so kpi_sum metric does not make - # sense as it would only cover part of the journey - kpi_sum = -1 - else: - if kpi_sum != -1: - kpi_sum += stats[m]["pass"]["duration"]["mean"] + for m in expected_metrics: + stats[m] = {"pass": {"duration": {"samples": 0}, "when": {}}, "fail": {"duration": {"samples": 0}, "when": {}}} + if m in stats_raw: + stats[m]["pass"]["duration"] = count_stats(stats_raw[m]["pass"]["duration"]) + stats[m]["fail"]["duration"] = count_stats(stats_raw[m]["fail"]["duration"]) + stats[m]["pass"]["when"] = count_stats_when(stats_raw[m]["pass"]["when"]) + stats[m]["fail"]["when"] = count_stats_when(stats_raw[m]["fail"]["when"]) - s = stats[m]["pass"]["duration"]["samples"] + stats[m]["fail"]["duration"]["samples"] - if s == 0: - stats[m]["error_rate"] = None + for k, v in stats_passes.items(): + if v.complete(expected_metrics): + kpi_successes += 1 + kpi_mean_data.append(v.total()) else: - stats[m]["error_rate"] = stats[m]["fail"]["duration"]["samples"] / s - kpi_errors += stats[m]["fail"]["duration"]["samples"] + kpi_errors += 1 stats["KPI"] = {} - stats["KPI"]["mean"] = kpi_sum + stats["KPI"] = count_stats(kpi_mean_data) + stats["KPI"]["successes"] = kpi_successes stats["KPI"]["errors"] = kpi_errors + #print("Final stats:") + #print(json.dumps(stats, indent=4)) + print(f"KPI mean: {stats['KPI']['mean']}") + print(f"KPI successes: {stats['KPI']['successes']}") print(f"KPI errors: {stats['KPI']['errors']}") with open(output_file, "w") as fp: diff --git a/tests/load-tests/loadtest.go b/tests/load-tests/loadtest.go index 4bd40fa5f5..218f87df98 100644 --- a/tests/load-tests/loadtest.go +++ b/tests/load-tests/loadtest.go @@ -6,9 +6,12 @@ import "time" import journey "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/journey" import options "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/options" import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" import cobra "github.com/spf13/cobra" import klog "k8s.io/klog/v2" +import textlogger "k8s.io/klog/v2/textlogger" +import ctrl "sigs.k8s.io/controller-runtime" //import "os" //import "context" @@ -36,23 +39,37 @@ func init() { rootCmd.Flags().StringVar(&opts.ComponentRepoRevision, "component-repo-revision", "main", "the component repo revision, git branch") rootCmd.Flags().StringVar(&opts.ComponentContainerFile, "component-repo-container-file", "Dockerfile", "the component repo container file to build") rootCmd.Flags().StringVar(&opts.ComponentContainerContext, "component-repo-container-context", "/", "the context for image build") + rootCmd.Flags().StringVar(&opts.ForkTarget, "fork-target", "", "the target namespace (GitLab) or organization (GitHub) to fork component repository to (if empty, will use MY_GITHUB_ORG env variable)") rootCmd.Flags().StringVar(&opts.QuayRepo, "quay-repo", "redhat-user-workloads-stage", "the target quay repo for PaC templated image pushes") - rootCmd.Flags().StringVar(&opts.UsernamePrefix, "username", "testuser", "the prefix used for usersignup names") + rootCmd.Flags().StringVar(&opts.RunPrefix, "runprefix", "testuser", "identifier used for prefix of usersignup names and as suffix when forking repo") + rootCmd.Flags().BoolVar(&opts.SerializeComponentOnboarding, "serialize-component-onboarding", false, "should we serialize creation and onboarding of a component (wait will not affect measurement)") rootCmd.Flags().BoolVarP(&opts.Stage, "stage", "s", false, "is you want to run the test on stage") + rootCmd.Flags().DurationVar(&opts.StartupDelay, "startup-delay", 0, "when starting per user/per application/per client treads, wait for this duration") + rootCmd.Flags().DurationVar(&opts.StartupJitter, "startup-jitter", 3*time.Second, "when applying startup delay, add or remove half of jitter with this maximum value") rootCmd.Flags().BoolVarP(&opts.Purge, "purge", "p", false, "purge all users or resources (on stage) after test is done") rootCmd.Flags().BoolVarP(&opts.PurgeOnly, "purge-only", "u", false, "do not run test, only purge resources (this implies --purge)") - rootCmd.Flags().StringVar(&opts.TestScenarioGitURL, "test-scenario-git-url", "https://github.com/konflux-ci/integration-examples.git", "test scenario GIT URL") + rootCmd.Flags().StringVar(&opts.TestScenarioGitURL, "test-scenario-git-url", "https://github.com/konflux-ci/integration-examples.git", "test scenario GIT URL (set to \"\" to disable creating these)") rootCmd.Flags().StringVar(&opts.TestScenarioRevision, "test-scenario-revision", "main", "test scenario GIT URL repo revision to use") rootCmd.Flags().StringVar(&opts.TestScenarioPathInRepo, "test-scenario-path-in-repo", "pipelines/integration_resolver_pipeline_pass.yaml", "test scenario path in GIT repo") + rootCmd.Flags().StringVar(&opts.ReleasePolicy, "release-policy", "", "enterprise contract policy name to use, e.g. \"tmp-onboard-policy\" (keep empty to skip release testing)") + rootCmd.Flags().StringVar(&opts.ReleasePipelineUrl, "release-pipeline-url", "https://github.com/konflux-ci/release-service-catalog.git", "release pipeline URL suitable for git resolver") + rootCmd.Flags().StringVar(&opts.ReleasePipelineRevision, "release-pipeline-revision", "production", "release pipeline repo branch suitable for git resolver") + rootCmd.Flags().StringVar(&opts.ReleasePipelinePath, "release-pipeline-path", "pipelines/managed/e2e/e2e.yaml", "release pipeline file path suitable for git resolver") + rootCmd.Flags().StringVar(&opts.ReleasePipelineServiceAccount, "release-pipeline-service-account", "release-serviceaccount", "service account to use for release pipeline") rootCmd.Flags().BoolVarP(&opts.WaitPipelines, "waitpipelines", "w", false, "if you want to wait for pipelines to finish") rootCmd.Flags().BoolVarP(&opts.WaitIntegrationTestsPipelines, "waitintegrationtestspipelines", "i", false, "if you want to wait for IntegrationTests (Integration Test Scenario) pipelines to finish") + rootCmd.Flags().BoolVarP(&opts.WaitRelease, "waitrelease", "r", false, "if you want to wait for Release to finish") rootCmd.Flags().BoolVar(&opts.FailFast, "fail-fast", false, "if you want the test to fail fast at first failure") rootCmd.Flags().IntVarP(&opts.Concurrency, "concurrency", "c", 1, "number of concurrent threads to execute") rootCmd.Flags().IntVar(&opts.JourneyRepeats, "journey-repeats", 1, "number of times to repeat user journey (either this or --journey-duration)") rootCmd.Flags().StringVar(&opts.JourneyDuration, "journey-duration", "1h", "repeat user journey until this timeout (either this or --journey-repeats)") + rootCmd.Flags().BoolVar(&opts.JourneyReuseApplications, "journey-reuse-applications", false, "when repeating journey, do not create new application (and integration test scenario and release plan and repease plan admission) on every journey repeat") + rootCmd.Flags().BoolVar(&opts.JourneyReuseComponents, "journey-reuse-componets", false, "when repeating journey, do not create new component on every journey repeat; this implies --journey-reuse-applications") rootCmd.Flags().BoolVar(&opts.PipelineMintmakerDisabled, "pipeline-mintmaker-disabled", true, "if you want to stop Mintmaker to be creating update PRs for your component (default in loadtest different from Konflux default)") rootCmd.Flags().BoolVar(&opts.PipelineRepoTemplating, "pipeline-repo-templating", false, "if we should use in repo template pipelines (merge PaC PR, template repo pipelines and ignore custom pipeline run, e.g. required for multi arch test)") - rootCmd.Flags().StringArrayVar(&opts.PipelineImagePullSecrets, "pipeline-image-pull-secrets", []string{}, "space separated secrets needed to pull task images") + rootCmd.Flags().StringVar(&opts.PipelineRepoTemplatingSource, "pipeline-repo-templating-source", "", "when templating, take template source files from this repository (\"\" means we will get source files from current repo)") + rootCmd.Flags().StringVar(&opts.PipelineRepoTemplatingSourceDir, "pipeline-repo-templating-source-dir", "", "when templating from additional repository, take template source files from this directory (\"\" means default \".template/\" will ne used)") + rootCmd.Flags().StringArrayVar(&opts.PipelineImagePullSecrets, "pipeline-image-pull-secrets", []string{}, "secret needed to pull task images, can be used multiple times") rootCmd.Flags().StringVarP(&opts.OutputDir, "output-dir", "o", ".", "directory where output files such as load-tests.log or load-tests.json are stored") rootCmd.Flags().StringVar(&opts.BuildPipelineSelectorBundle, "build-pipeline-selector-bundle", "", "BuildPipelineSelector bundle to use when testing with build-definition PR") rootCmd.Flags().BoolVarP(&opts.LogInfo, "log-info", "v", false, "log messages with info level and above") @@ -63,6 +80,15 @@ func init() { func main() { var err error + // Setup logging + klog.InitFlags(nil) + defer klog.Flush() + // Set the controller-runtime logger to use klogr. + // This makes controller-runtime logs go through klog. + // Hopefuly will help us to avoid these errors: + // [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. + ctrl.SetLogger(textlogger.NewLogger(textlogger.NewConfig())) + // Setup argument parser err = rootCmd.Execute() if err != nil { @@ -90,19 +116,27 @@ func main() { } // Show test options - logging.Logger.Debug("Options: %+v", opts) + logging.Logger.Debug("Options: %+v", &opts) // Tier up measurements logger logging.MeasurementsStart(opts.OutputDir) - // Start given number of `perUserThread()` threads using `journey.Setup()` and wait for them to finish - _, err = logging.Measure(journey.Setup, perUserThread, &opts) + // Start given number of `perUserThread()` threads using `journey.PerUserSetup()` and wait for them to finish + _, err = logging.Measure( + nil, + journey.PerUserSetup, + perUserThread, + &opts, + ) if err != nil { logging.Logger.Fatal("Threads setup failed: %v", err) } // Cleanup resources - _, err = logging.Measure(journey.Purge) + _, err = logging.Measure( + nil, + journey.Purge, + ) if err != nil { logging.Logger.Error("Purging failed: %v", err) } @@ -112,8 +146,10 @@ func main() { } // Single user journey -func perUserThread(threadCtx *journey.MainContext) { - defer threadCtx.ThreadsWG.Done() +func perUserThread(perUserCtx *types.PerUserContext) { + defer perUserCtx.PerUserWG.Done() + + time.Sleep(perUserCtx.StartupPause) var err error @@ -131,9 +167,9 @@ func perUserThread(threadCtx *journey.MainContext) { //} //// Create watcher //fmt.Print("Creating watcher...\n") - //watcher, err2 := threadCtx.Framework.AsKubeDeveloper.CommonController.DynamicClient(). + //watcher, err2 := perUserCtx.Framework.AsKubeDeveloper.CommonController.DynamicClient(). // Resource(gvr). - // Namespace(threadCtx.Namespace). + // Namespace(perUserCtx.Namespace). // Watch(watchCtx, listOptions) //if err2 != nil { // fmt.Printf("Can not get watcher: %v", err2) @@ -194,16 +230,21 @@ func perUserThread(threadCtx *journey.MainContext) { //watcher.Stop() //os.Exit(10) - for threadCtx.JourneyRepeatsCounter = 1; threadCtx.JourneyRepeatsCounter <= threadCtx.Opts.JourneyRepeats; threadCtx.JourneyRepeatsCounter++ { + for perUserCtx.JourneyRepeatsCounter = 0; perUserCtx.JourneyRepeatsCounter < perUserCtx.Opts.JourneyRepeats; perUserCtx.JourneyRepeatsCounter++ { // Start given number of `perApplicationThread()` threads using `journey.PerApplicationSetup()` and wait for them to finish - _, err = logging.Measure(journey.PerApplicationSetup, perApplicationThread, threadCtx) + _, err = logging.Measure( + perUserCtx, + journey.PerApplicationSetup, + perApplicationThread, + perUserCtx, + ) if err != nil { logging.Logger.Fatal("Per application threads setup failed: %v", err) } // Check if we are supposed to quit based on --journey-duration - if time.Now().UTC().After(threadCtx.Opts.JourneyUntil) { + if time.Now().UTC().After(perUserCtx.Opts.JourneyUntil) { logging.Logger.Debug("Done with user journey because of timeout") break } @@ -211,7 +252,11 @@ func perUserThread(threadCtx *journey.MainContext) { } // Collect info about PVCs - _, err = logging.Measure(journey.HandlePersistentVolumeClaim, threadCtx) + _, err = logging.Measure( + perUserCtx, + journey.HandlePersistentVolumeClaim, + perUserCtx, + ) if err != nil { logging.Logger.Error("Thread failed: %v", err) return @@ -220,34 +265,74 @@ func perUserThread(threadCtx *journey.MainContext) { } // Single application journey (there can be multiple parallel apps per user) -func perApplicationThread(perApplicationCtx *journey.PerApplicationContext) { +func perApplicationThread(perApplicationCtx *types.PerApplicationContext) { defer perApplicationCtx.PerApplicationWG.Done() + defer func() { + _, err := logging.Measure( + perApplicationCtx, + journey.HandlePerApplicationCollection, + perApplicationCtx, + ) + if err != nil { + logging.Logger.Error("Per application thread failed: %v", err) + } + }() + + time.Sleep(perApplicationCtx.StartupPause) var err error // Create framework so we do not have to share framework with parent thread - _, err = logging.Measure(journey.HandleNewFrameworkForApp, perApplicationCtx) + _, err = logging.Measure( + perApplicationCtx, + journey.HandleNewFrameworkForApp, + perApplicationCtx, + ) if err != nil { logging.Logger.Error("Per application thread failed: %v", err) return } // Create application - _, err = logging.Measure(journey.HandleApplication, perApplicationCtx) + _, err = logging.Measure( + perApplicationCtx, + journey.HandleApplication, + perApplicationCtx, + ) if err != nil { - logging.Logger.Error("Thread failed: %v", err) + logging.Logger.Error("Per application thread failed: %v", err) return } // Create integration test scenario - _, err = logging.Measure(journey.HandleIntegrationTestScenario, perApplicationCtx) + _, err = logging.Measure( + perApplicationCtx, + journey.HandleIntegrationTestScenario, + perApplicationCtx, + ) if err != nil { - logging.Logger.Error("Thread failed: %v", err) + logging.Logger.Error("Per application thread failed: %v", err) + return + } + + // Create release plan and release plan admission + _, err = logging.Measure( + perApplicationCtx, + journey.HandleReleaseSetup, + perApplicationCtx, + ) + if err != nil { + logging.Logger.Error("Per application thread failed: %v", err) return } // Start given number of `perComponentThread()` threads using `journey.PerComponentSetup()` and wait for them to finish - _, err = logging.Measure(journey.PerComponentSetup, perComponentThread, perApplicationCtx) + _, err = logging.Measure( + perApplicationCtx, + journey.PerComponentSetup, + perComponentThread, + perApplicationCtx, + ) if err != nil { logging.Logger.Fatal("Per component threads setup failed: %v", err) } @@ -255,40 +340,73 @@ func perApplicationThread(perApplicationCtx *journey.PerApplicationContext) { } // Single component journey (there can be multiple parallel comps per app) -func perComponentThread(perComponentCtx *journey.PerComponentContext) { +func perComponentThread(perComponentCtx *types.PerComponentContext) { defer perComponentCtx.PerComponentWG.Done() defer func() { - _, err := logging.Measure(journey.HandlePerComponentCollection, perComponentCtx) + _, err := logging.Measure( + perComponentCtx, + journey.HandlePerComponentCollection, + perComponentCtx, + ) if err != nil { logging.Logger.Error("Per component thread failed: %v", err) } }() + time.Sleep(perComponentCtx.StartupPause) + var err error // Create framework so we do not have to share framework with parent thread - _, err = logging.Measure(journey.HandleNewFrameworkForComp, perComponentCtx) + _, err = logging.Measure( + perComponentCtx, + journey.HandleNewFrameworkForComp, + perComponentCtx, + ) if err != nil { logging.Logger.Error("Per component thread failed: %v", err) return } // Create component - _, err = logging.Measure(journey.HandleComponent, perComponentCtx) + _, err = logging.Measure( + perComponentCtx, + journey.HandleComponent, + perComponentCtx, + ) if err != nil { logging.Logger.Error("Per component thread failed: %v", err) return } // Wait for build pipiline run - _, err = logging.Measure(journey.HandlePipelineRun, perComponentCtx) + _, err = logging.Measure( + perComponentCtx, + journey.HandlePipelineRun, + perComponentCtx, + ) if err != nil { logging.Logger.Error("Per component thread failed: %v", err) return } // Wait for test pipiline run - _, err = logging.Measure(journey.HandleTest, perComponentCtx) + _, err = logging.Measure( + perComponentCtx, + journey.HandleTest, + perComponentCtx, + ) + if err != nil { + logging.Logger.Error("Per component thread failed: %v", err) + return + } + + // Wait for release to finish + _, err = logging.Measure( + perComponentCtx, + journey.HandleReleaseRun, + perComponentCtx, + ) if err != nil { logging.Logger.Error("Per component thread failed: %v", err) return diff --git a/tests/load-tests/pkg/journey/handle_applications.go b/tests/load-tests/pkg/journey/handle_applications.go index 672b55f376..ca685a0bcc 100644 --- a/tests/load-tests/pkg/journey/handle_applications.go +++ b/tests/load-tests/pkg/journey/handle_applications.go @@ -4,16 +4,20 @@ import "fmt" import "time" import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" import framework "github.com/konflux-ci/e2e-tests/pkg/framework" import utils "github.com/konflux-ci/e2e-tests/pkg/utils" -func createApplication(f *framework.Framework, namespace string, timeout time.Duration, name string) error { - _, err := f.AsKubeDeveloper.HasController.CreateApplicationWithTimeout(name, namespace, timeout) +import util "github.com/devfile/library/v2/pkg/util" + +func createApplication(f *framework.Framework, namespace string, runPrefix string) (string, error) { + name := fmt.Sprintf("%s-app-%s", runPrefix, util.GenerateRandomString(5)) + _, err := f.AsKubeDeveloper.HasController.CreateApplicationWithTimeout(name, namespace, time.Minute*60) if err != nil { - return fmt.Errorf("Unable to create the Application %s: %v", name, err) + return "", fmt.Errorf("Unable to create the Application %s: %v", name, err) } - return nil + return name, nil } func validateApplication(f *framework.Framework, name, namespace string) error { @@ -34,30 +38,71 @@ func validateApplication(f *framework.Framework, name, namespace string) error { return err } -func HandleApplication(ctx *PerApplicationContext) error { +func HandleApplication(ctx *types.PerApplicationContext) error { + if ctx.ParentContext.Opts.JourneyReuseApplications && ctx.JourneyRepeatIndex > 0 { + // This is a reused application. We need to get the name from the first application. + // We must wait until the first application's context has the name. + firstApplicationCtx := ctx.ParentContext.PerApplicationContexts[ctx.ApplicationIndex] + + interval := time.Second * 2 + timeout := time.Minute * 20 + + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + if firstApplicationCtx.ApplicationName != "" && firstApplicationCtx.IntegrationTestScenarioName != "" && (ctx.ParentContext.Opts.ReleasePolicy == "" || (firstApplicationCtx.ReleasePlanName != "" && firstApplicationCtx.ReleasePlanAdmissionName != "")) { + logging.Logger.Debug("Reused application name is now available: %s", firstApplicationCtx.ApplicationName) + return true, nil + } + logging.Logger.Trace("Waiting for application name from first application thread") + return false, nil + }, interval, timeout) + + if err != nil { + return logging.Logger.Fail(30, "timed out waiting for application name from first application thread: %v", err) + } + + ctx.ApplicationName = firstApplicationCtx.ApplicationName + ctx.IntegrationTestScenarioName = firstApplicationCtx.IntegrationTestScenarioName + ctx.ReleasePlanName = firstApplicationCtx.ReleasePlanName + ctx.ReleasePlanAdmissionName = firstApplicationCtx.ReleasePlanAdmissionName + logging.Logger.Debug("Reusing application %s and others in thread %d-%d", ctx.ApplicationName, ctx.ParentContext.UserIndex, ctx.ApplicationIndex) + } + + if ctx.ApplicationName != "" { + logging.Logger.Debug("Skipping application creation because reusing application %s in namespace %s", ctx.ApplicationName, ctx.ParentContext.Namespace) + return nil + } + + var iface interface{} var err error + var ok bool logging.Logger.Debug("Creating application %s in namespace %s", ctx.ApplicationName, ctx.ParentContext.Namespace) - _, err = logging.Measure( + iface, err = logging.Measure( + ctx, createApplication, ctx.Framework, ctx.ParentContext.Namespace, - time.Minute*60, - ctx.ApplicationName, + ctx.ParentContext.Opts.RunPrefix, ) if err != nil { return logging.Logger.Fail(30, "Application failed creation: %v", err) } + ctx.ApplicationName, ok = iface.(string) + if !ok { + return logging.Logger.Fail(31, "Type assertion failed on application name: %+v", iface) + } + _, err = logging.Measure( + ctx, validateApplication, ctx.Framework, ctx.ApplicationName, ctx.ParentContext.Namespace, ) if err != nil { - return logging.Logger.Fail(31, "Application failed validation: %v", err) + return logging.Logger.Fail(32, "Application failed validation: %v", err) } return nil diff --git a/tests/load-tests/pkg/journey/handle_collections.go b/tests/load-tests/pkg/journey/handle_collections.go index a1048a98aa..874b56c470 100644 --- a/tests/load-tests/pkg/journey/handle_collections.go +++ b/tests/load-tests/pkg/journey/handle_collections.go @@ -8,10 +8,14 @@ import ( "path/filepath" logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" + types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" framework "github.com/konflux-ci/e2e-tests/pkg/framework" + + k8s_api_errors "k8s.io/apimachinery/pkg/api/errors" ) + func getDirName(baseDir, namespace, iteration string) string { return filepath.Join(baseDir, "collected-data", namespace, iteration) + "/" } @@ -44,15 +48,15 @@ func writeToFile(dirPath, file string, contents []byte) error { return nil } -func collectPodLogs(f *framework.Framework, dirPath, namespace, component string) error { +func collectPodLogs(f *framework.Framework, dirPath, namespace, application string) error { podList, err := f.AsKubeAdmin.CommonController.ListPods( namespace, - "appstudio.openshift.io/component", - component, + "appstudio.openshift.io/application", + application, 100, ) if err != nil { - return fmt.Errorf("Failed to list pods in namespace %s for component %s: %v", namespace, component, err) + return fmt.Errorf("Failed to list pods in namespace %s for application %s: %v", namespace, application, err) } for _, pod := range podList.Items { @@ -87,12 +91,24 @@ func collectPodLogs(f *framework.Framework, dirPath, namespace, component string return nil } -func collectPipelineRunJSONs(f *framework.Framework, dirPath, namespace, application, component string) error { +func collectPipelineRunJSONs(f *framework.Framework, dirPath, namespace, application, component, release string) error { prs, err := f.AsKubeDeveloper.HasController.GetComponentPipelineRunsWithType(component, application, namespace, "", "", "") if err != nil { return fmt.Errorf("Failed to list PipelineRuns %s/%s/%s: %v", namespace, application, component, err) } + if release != "" { + pr_release, err := f.AsKubeDeveloper.ReleaseController.GetPipelineRunInNamespace(namespace, release, namespace) + if err != nil { + logging.Logger.Warning("Failed to get Release PipelineRun %s/%s: %v", namespace, release, err) + } + + // Add release pipeline runs to the list + if pr_release != nil { + *prs = append(*prs, *pr_release) + } + } + for _, pr := range *prs { prJSON, err := json.Marshal(pr) if err != nil { @@ -134,7 +150,7 @@ func collectPipelineRunJSONs(f *framework.Framework, dirPath, namespace, applica return nil } -func collectApplicationComponentJSONs(f *framework.Framework, dirPath, namespace, application, component string) error { +func collectApplicationJSONs(f *framework.Framework, dirPath, namespace, application string) error { appJsonFileName := "collected-application-" + application + ".json" // Only save Application JSON if it has not already been collected (as HandlePerComponentCollection method is called for each component) if _, err := os.Stat(filepath.Join(dirPath, appJsonFileName)); errors.Is(err, os.ErrNotExist) { @@ -155,6 +171,10 @@ func collectApplicationComponentJSONs(f *framework.Framework, dirPath, namespace } } + return nil +} + +func collectComponentJSONs(f *framework.Framework, dirPath, namespace, component string) error { // Collect Component JSON comp, err := f.AsKubeDeveloper.HasController.GetComponent(component, namespace) if err != nil { @@ -174,7 +194,127 @@ func collectApplicationComponentJSONs(f *framework.Framework, dirPath, namespace return nil } -func HandlePerComponentCollection(ctx *PerComponentContext) error { +func collectReleaseRelatedJSONs(f *framework.Framework, dirPath, namespace, appName, compName, snapName, releasePlanName, releasePlanAdmissionName, relName string) error { + // Collect ReleasePlan JSON + if releasePlanName != "" { + releasePlan, err := f.AsKubeDeveloper.ReleaseController.GetReleasePlan(releasePlanName, namespace) + if err != nil { + if !k8s_api_errors.IsNotFound(err) { + return fmt.Errorf("Failed to get Release Plan %s: %v", releasePlanName, err) + } + } + + if err == nil { + releasePlanJSON, err := json.Marshal(releasePlan) + if err != nil { + return fmt.Errorf("Failed to dump Release Plan JSON: %v", err) + } + + err = writeToFile(dirPath, "collected-releaseplan-" + releasePlanName + ".json", releasePlanJSON) + if err != nil { + return fmt.Errorf("Failed to write Release Plan: %v", err) + } + } + } + + // Collect ReleasePlanAdmission JSON + if releasePlanAdmissionName != "" { + releasePlanAdmission, err := f.AsKubeDeveloper.ReleaseController.GetReleasePlanAdmission(releasePlanAdmissionName, namespace) + if err != nil { + if !k8s_api_errors.IsNotFound(err) { + return fmt.Errorf("Failed to get Release Plan Admission %s: %v", releasePlanAdmissionName, err) + } + } + + if err == nil { + releasePlanAdmissionJSON, err := json.Marshal(releasePlanAdmission) + if err != nil { + return fmt.Errorf("Failed to dump Release Plan Admission JSON: %v", err) + } + + err = writeToFile(dirPath, "collected-releaseplanadmission-" + releasePlanAdmissionName + ".json", releasePlanAdmissionJSON) + if err != nil { + return fmt.Errorf("Failed to write Release Plan Admission: %v", err) + } + } + } + + // Collect Snapshot JSON + if len(snapName) > 0 { + snap, err := f.AsKubeDeveloper.IntegrationController.GetSnapshot(snapName, "", compName, namespace) + if err != nil { + if !k8s_api_errors.IsNotFound(err) { + return fmt.Errorf("Failed to get Snapshot %s: %v", snapName, err) + } + } + + if err == nil { + snapJSON, err := json.Marshal(snap) + if err != nil { + return fmt.Errorf("Failed to dump Snapshot JSON: %v", err) + } + + err = writeToFile(dirPath, "collected-snapshot-" + snapName + ".json", snapJSON) + if err != nil { + return fmt.Errorf("Failed to write Snapshot: %v", err) + } + } + } + + // Collect Release JSON + if len(relName) > 0 { + rel, err := f.AsKubeDeveloper.ReleaseController.GetRelease(relName, "", namespace) + if err != nil { + if !k8s_api_errors.IsNotFound(err) { + return fmt.Errorf("Failed to get Release %s: %v", relName, err) + } + } + + if err == nil { + relJSON, err := json.Marshal(rel) + if err != nil { + return fmt.Errorf("Failed to dump Release JSON: %v", err) + } + + err = writeToFile(dirPath, "collected-release-" + relName + ".json", relJSON) + if err != nil { + return fmt.Errorf("Failed to write Release: %v", err) + } + } + } + + return nil +} + +func HandlePerApplicationCollection(ctx *types.PerApplicationContext) error { + if ctx.ApplicationName == "" { + logging.Logger.Debug("Application name not populated, so skipping per-application collections in %s", ctx.ParentContext.Namespace) + return nil + } + + var err error + + journeyCounterStr := fmt.Sprintf("%d", ctx.JourneyRepeatIndex) + dirPath := getDirName(ctx.ParentContext.Opts.OutputDir, ctx.ParentContext.Namespace, journeyCounterStr) + err = createDir(dirPath) + if err != nil { + return logging.Logger.Fail(105, "Failed to create dir: %v", err) + } + + err = collectPodLogs(ctx.Framework, dirPath, ctx.ParentContext.Namespace, ctx.ApplicationName) + if err != nil { + return logging.Logger.Fail(106, "Failed to collect pod logs: %v", err) + } + + err = collectApplicationJSONs(ctx.Framework, dirPath, ctx.ParentContext.Namespace, ctx.ApplicationName) + if err != nil { + return logging.Logger.Fail(107, "Failed to collect application JSONs: %v", err) + } + + return nil +} + +func HandlePerComponentCollection(ctx *types.PerComponentContext) error { if ctx.ComponentName == "" { logging.Logger.Debug("Component name not populated, so skipping per-component collections in %s", ctx.ParentContext.ParentContext.Namespace) return nil @@ -182,7 +322,7 @@ func HandlePerComponentCollection(ctx *PerComponentContext) error { var err error - journeyCounterStr := fmt.Sprintf("%d", ctx.ParentContext.ParentContext.JourneyRepeatsCounter) + journeyCounterStr := fmt.Sprintf("%d", ctx.ParentContext.JourneyRepeatIndex) dirPath := getDirName(ctx.ParentContext.ParentContext.Opts.OutputDir, ctx.ParentContext.ParentContext.Namespace, journeyCounterStr) err = createDir(dirPath) if err != nil { @@ -194,14 +334,19 @@ func HandlePerComponentCollection(ctx *PerComponentContext) error { return logging.Logger.Fail(101, "Failed to collect pod logs: %v", err) } - err = collectPipelineRunJSONs(ctx.Framework, dirPath, ctx.ParentContext.ParentContext.Namespace, ctx.ParentContext.ApplicationName, ctx.ComponentName) + err = collectPipelineRunJSONs(ctx.Framework, dirPath, ctx.ParentContext.ParentContext.Namespace, ctx.ParentContext.ApplicationName, ctx.ComponentName, ctx.ReleaseName) if err != nil { return logging.Logger.Fail(102, "Failed to collect pipeline run JSONs: %v", err) } - err = collectApplicationComponentJSONs(ctx.Framework, dirPath, ctx.ParentContext.ParentContext.Namespace, ctx.ParentContext.ApplicationName, ctx.ComponentName) + err = collectComponentJSONs(ctx.Framework, dirPath, ctx.ParentContext.ParentContext.Namespace, ctx.ComponentName) + if err != nil { + return logging.Logger.Fail(103, "Failed to collect component JSONs: %v", err) + } + + err = collectReleaseRelatedJSONs(ctx.Framework, dirPath, ctx.ParentContext.ParentContext.Namespace, ctx.ParentContext.ApplicationName, ctx.ComponentName, ctx.SnapshotName, ctx.ParentContext.ReleasePlanName, ctx.ParentContext.ReleasePlanAdmissionName, ctx.ReleaseName) if err != nil { - return logging.Logger.Fail(102, "Failed to collect Application and Component JSONs: %v", err) + return logging.Logger.Fail(104, "Failed to collect release related JSONs: %v", err) } return nil diff --git a/tests/load-tests/pkg/journey/handle_component.go b/tests/load-tests/pkg/journey/handle_component.go index 8e304a30e2..81e3e46cfa 100644 --- a/tests/load-tests/pkg/journey/handle_component.go +++ b/tests/load-tests/pkg/journey/handle_component.go @@ -9,6 +9,7 @@ import ( "time" logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" + types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" constants "github.com/konflux-ci/e2e-tests/pkg/constants" @@ -73,6 +74,7 @@ func getPaCPull(annotations map[string]string) (string, error) { // Get "merge-url" if data, ok = pac["merge-url"].(string); ok { + logging.Logger.Debug("Found PaC merge request URL: %s", data) return data, nil } else { return "", fmt.Errorf("Failed parsing state: %s", buildStatusValue) @@ -82,7 +84,11 @@ func getPaCPull(annotations map[string]string) (string, error) { } } -func createComponent(f *framework.Framework, namespace, name, repoUrl, repoRevision, containerContext, containerFile, buildPipelineSelector, appName string, mintmakerDisabled bool) error { +func createComponent(f *framework.Framework, namespace, repoUrl, repoRevision, containerContext, containerFile, buildPipelineSelector, appName string, componentIndex int, mintmakerDisabled bool) (string, error) { + name := fmt.Sprintf("%s-comp-%d", appName, componentIndex) + + logging.Logger.Debug("Creating component %s in namespace %s", name, namespace) + // Prepare annotations to add to component annotationsMap := constants.DefaultDockerBuildPipelineBundleAnnotation if buildPipelineSelector != "" { @@ -117,9 +123,42 @@ func createComponent(f *framework.Framework, namespace, name, repoUrl, repoRevis _, err := f.AsKubeDeveloper.HasController.CreateComponent(componentObj, namespace, "", "", appName, false, annotationsMap) if err != nil { - return fmt.Errorf("Unable to create the Component %s: %v", name, err) + return "", fmt.Errorf("Unable to create the Component %s: %v", name, err) } - return nil + return name, nil +} + +func validateComponent(f *framework.Framework, namespace, name string) error { + interval := time.Second * 10 + timeout := time.Minute * 30 + + // TODO It would be much better to watch this resource instead querying it + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + comp, err := f.AsKubeDeveloper.HasController.GetComponent(name, namespace) + if err != nil { + logging.Logger.Debug("Unable to get component %s in namespace %s for its annotations: %v", name, namespace, err) + return false, nil + } + + // If build.appstudio.openshift.io/request annotation is gone, component finished onboarding + _, ok := comp.Annotations["build.appstudio.openshift.io/request"] + if ! ok { + logging.Logger.Debug("Finished onboarding of component %s in namespace %s", name, namespace) + return true, nil + } + + // If it is still there, build.appstudio.openshift.io/status will have a reason + val, ok := comp.Annotations["build.appstudio.openshift.io/status"] + if ok { + logging.Logger.Debug("Onboarding of a component %s in namespace %s not finished yet: %s", name, namespace, val) + } else { + logging.Logger.Debug("Onboarding of a component %s in namespace %s not started yet", name, namespace) + } + + return false, nil + }, interval, timeout) + + return err } func getPaCPullNumber(f *framework.Framework, namespace, name string) (int, error) { @@ -140,7 +179,8 @@ func getPaCPullNumber(f *framework.Framework, namespace, name string) (int, erro // Check for right annotation pull, err = getPaCPull(comp.Annotations) if err != nil { - return false, fmt.Errorf("PaC component %s in namespace %s failed on PR annotation: %v", name, namespace, err) + logging.Logger.Debug("PaC component %s in namespace %s failed on PR annotation: %v", name, namespace, err) + return false, nil } if pull == "" { logging.Logger.Debug("PaC component %s in namespace %s do not have PR yet", name, namespace) @@ -168,7 +208,6 @@ func configurePipelineImagePullSecrets(f *framework.Framework, namespace, compon component_sa := "build-pipeline-" + component for _, secret := range secrets { - println("-", secret) err := f.AsKubeAdmin.CommonController.LinkSecretToServiceAccount(namespace, secret, component_sa, true) if err != nil { return fmt.Errorf("Unable to add secret %s to service account %s: %v", secret, component_sa, err) @@ -183,7 +222,7 @@ func listPipelineRunsWithTimeout(f *framework.Framework, namespace, appName, com var err error interval := time.Second * 20 - timeout := time.Minute * 60 + timeout := time.Minute * 30 err = utils.WaitUntilWithInterval(func() (done bool, err error) { prs, err = f.AsKubeDeveloper.HasController.GetComponentPipelineRunsWithType(compName, appName, namespace, "build", sha, "") @@ -225,8 +264,7 @@ func listAndDeletePipelineRunsWithTimeout(f *framework.Framework, namespace, app } // This handles post-component creation tasks for multi-arch PaC workflow -func utilityRepoTemplatingComponentCleanup(f *framework.Framework, namespace, appName, compName, repoUrl, repoRev string, mergeReqNum int, placeholders *map[string]string) error { - var repoName string +func utilityRepoTemplatingComponentCleanup(f *framework.Framework, namespace, appName, compName, repoUrl, repoRev, sourceRepo, sourceRepoDir string, mergeReqNum int, placeholders *map[string]string) error { var err error // Delete on-pull-request default pipeline run @@ -237,19 +275,26 @@ func utilityRepoTemplatingComponentCleanup(f *framework.Framework, namespace, ap logging.Logger.Debug("Repo-templating workflow: Cleaned up (first cleanup) for %s/%s/%s", namespace, appName, compName) // Merge default PaC pipelines PR - repoName, err = getRepoNameFromRepoUrl(repoUrl) - if err != nil { - return fmt.Errorf("Failed parsing repo name: %v", err) - } if strings.Contains(repoUrl, "gitlab.") { - _, err = f.AsKubeAdmin.CommonController.Gitlab.AcceptMergeRequest(repoName, mergeReqNum) + repoId, err := getRepoIdFromRepoUrl(repoUrl) + if err != nil { + return fmt.Errorf("Failed parsing repo org/name: %v", err) + } + _, err = f.AsKubeAdmin.CommonController.Gitlab.AcceptMergeRequest(repoId, mergeReqNum) + if err != nil { + return fmt.Errorf("Merging %d failed: %v", mergeReqNum, err) + } } else { + repoName, err := getRepoNameFromRepoUrl(repoUrl) + if err != nil { + return fmt.Errorf("Failed parsing repo name: %v", err) + } _, err = f.AsKubeAdmin.CommonController.Github.MergePullRequest(repoName, mergeReqNum) + if err != nil { + return fmt.Errorf("Merging %d failed: %v", mergeReqNum, err) + } } - if err != nil { - return fmt.Errorf("Merging %d failed: %v", mergeReqNum, err) - } - logging.Logger.Debug("Repo-templating workflow: Merged PR %d in %s", mergeReqNum, repoName) + logging.Logger.Debug("Repo-templating workflow: Merged PR %d in %s", mergeReqNum, repoUrl) // Delete all pipeline runs as we do not care about these err = listAndDeletePipelineRunsWithTimeout(f, namespace, appName, compName, "", 1) @@ -259,7 +304,7 @@ func utilityRepoTemplatingComponentCleanup(f *framework.Framework, namespace, ap logging.Logger.Debug("Repo-templating workflow: Cleaned up (second cleanup) for %s/%s/%s", namespace, appName, compName) // Template our multi-arch PaC files - shaMap, err := templateFiles(f, repoUrl, repoRev, placeholders) + shaMap, err := templateFiles(f, repoUrl, repoRev, sourceRepo, sourceRepoDir, placeholders) if err != nil { return fmt.Errorf("Error templating PaC files: %v", err) } @@ -279,45 +324,123 @@ func utilityRepoTemplatingComponentCleanup(f *framework.Framework, namespace, ap return nil } -func HandleComponent(ctx *PerComponentContext) error { - var err error +func HandleComponent(ctx *types.PerComponentContext) error { + if ctx.ParentContext.ParentContext.Opts.JourneyReuseComponents && ctx.ParentContext.JourneyRepeatIndex > 0 { + // This is a reused component. We need to get the name from the component from the first journey. + // We must wait until the component's context from the first journey has the name. + firstApplicationCtx := ctx.ParentContext.ParentContext.PerApplicationContexts[ctx.ParentContext.ApplicationIndex] + firstComponentCtx := firstApplicationCtx.PerComponentContexts[ctx.ComponentIndex] + + interval := time.Second * 2 + timeout := time.Minute * 20 - logging.Logger.Debug("Creating component %s in namespace %s", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + if firstComponentCtx.ComponentName != "" { + logging.Logger.Debug("Reused component name is now available: %s", firstComponentCtx.ComponentName) + return true, nil + } + logging.Logger.Trace("Waiting for component name from first component thread") + return false, nil + }, interval, timeout) + + if err != nil { + return logging.Logger.Fail(60, "timed out waiting for component name from first component thread: %v", err) + } + + ctx.ComponentName = firstComponentCtx.ComponentName + logging.Logger.Debug("Reusing component %s in thread %d-%d-%d", ctx.ComponentName, ctx.ParentContext.ParentContext.UserIndex, ctx.ParentContext.ApplicationIndex, ctx.ComponentIndex) + } + + if ctx.ComponentName != "" { + logging.Logger.Debug("Skipping setting up component because reusing component %s in namespace %s, triggering build with push to the repo", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + _, err := doHarmlessCommit(ctx.Framework, ctx.ParentContext.ParentContext.ComponentRepoUrl, ctx.ParentContext.ParentContext.Opts.ComponentRepoRevision) + if err != nil { + return logging.Logger.Fail(60, "Commiting to repo for reused component %s in namespace %s failed: %v", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace, err) + } + return nil + } + + if ctx.ParentContext.ParentContext.Opts.SerializeComponentOnboarding { + logging.Logger.Debug("Waiting to create component in namespace %s", ctx.ParentContext.ParentContext.Namespace) + ctx.ParentContext.ParentContext.Opts.SerializeComponentOnboardingLock.Lock() + } + + var iface interface{} + var ok bool + var err error + var mergeRequestNumber int // Create component - _, err = logging.Measure( + iface, err = logging.Measure( + ctx, createComponent, ctx.Framework, ctx.ParentContext.ParentContext.Namespace, - ctx.ComponentName, ctx.ParentContext.ParentContext.ComponentRepoUrl, ctx.ParentContext.ParentContext.Opts.ComponentRepoRevision, ctx.ParentContext.ParentContext.Opts.ComponentContainerContext, ctx.ParentContext.ParentContext.Opts.ComponentContainerFile, ctx.ParentContext.ParentContext.Opts.BuildPipelineSelectorBundle, ctx.ParentContext.ApplicationName, + ctx.ComponentIndex, ctx.ParentContext.ParentContext.Opts.PipelineMintmakerDisabled, ) if err != nil { - return logging.Logger.Fail(60, "Component failed creation: %v", err) + return logging.Logger.Fail(61, "Component failed creation: %v", err) + } + + ctx.ComponentName, ok = iface.(string) + if !ok { + return logging.Logger.Fail(62, "Type assertion failed on component name: %+v", iface) + } + + // Validate component build service account created + _, err = logging.Measure( + ctx, + validateComponent, + ctx.Framework, + ctx.ParentContext.ParentContext.Namespace, + ctx.ComponentName, + ) + if err != nil { + return logging.Logger.Fail(63, "Component failed onboarding: %v", err) + } + + if ctx.ParentContext.ParentContext.Opts.SerializeComponentOnboarding { + ctx.ParentContext.ParentContext.Opts.SerializeComponentOnboardingLock.Unlock() + logging.Logger.Debug("Freed lock to create another component after %s in namespace %s", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + } + + // Configure imagePullSecrets needed for component build task images + if len(ctx.ParentContext.ParentContext.Opts.PipelineImagePullSecrets) > 0 { + _, err = logging.Measure( + ctx, + configurePipelineImagePullSecrets, + ctx.Framework, + ctx.ParentContext.ParentContext.Namespace, + ctx.ComponentName, + ctx.ParentContext.ParentContext.Opts.PipelineImagePullSecrets, + ) + if err != nil { + return logging.Logger.Fail(64, "Failed to configure pipeline imagePullSecrets: %v", err) + } } - var pullIface interface{} - pullIface, err = logging.Measure( + iface, err = logging.Measure( + ctx, getPaCPullNumber, ctx.Framework, ctx.ParentContext.ParentContext.Namespace, ctx.ComponentName, ) if err != nil { - return logging.Logger.Fail(61, "Component failed validation: %v", err) + return logging.Logger.Fail(65, "Component failed validation: %v", err) } // Get merge request number - var ok bool - ctx.MergeRequestNumber, ok = pullIface.(int) + mergeRequestNumber, ok = iface.(int) if !ok { - return logging.Logger.Fail(62, "Type assertion failed on pull: %+v", pullIface) + return logging.Logger.Fail(66, "Type assertion failed on pull: %+v", iface) } // If this is supposed to be a multi-arch build, we do not care about @@ -336,6 +459,7 @@ func HandleComponent(ctx *PerComponentContext) error { // Skip what we do not care about, merge PR, graft pipeline yamls _, err = logging.Measure( + ctx, utilityRepoTemplatingComponentCleanup, ctx.Framework, ctx.ParentContext.ParentContext.Namespace, @@ -343,28 +467,16 @@ func HandleComponent(ctx *PerComponentContext) error { ctx.ComponentName, ctx.ParentContext.ParentContext.ComponentRepoUrl, ctx.ParentContext.ParentContext.Opts.ComponentRepoRevision, - ctx.MergeRequestNumber, + ctx.ParentContext.ParentContext.Opts.PipelineRepoTemplatingSource, + ctx.ParentContext.ParentContext.Opts.PipelineRepoTemplatingSourceDir, + mergeRequestNumber, placeholders, ) if err != nil { - return logging.Logger.Fail(63, "Repo-templating workflow component cleanup failed: %v", err) + return logging.Logger.Fail(67, "Repo-templating workflow component cleanup failed: %v", err) } } - // Configure imagePullSecrets needed for component build task images - if len(ctx.ParentContext.ParentContext.Opts.PipelineImagePullSecrets) > 0 { - _, err = logging.Measure( - configurePipelineImagePullSecrets, - ctx.Framework, - ctx.ParentContext.ParentContext.Namespace, - ctx.ComponentName, - ctx.ParentContext.ParentContext.Opts.PipelineImagePullSecrets, - ) - if err != nil { - return logging.Logger.Fail(64, "Failed to configure pipeline imagePullSecrets: %v", err) - } - } - return nil } diff --git a/tests/load-tests/pkg/journey/handle_integration_test_scenarios.go b/tests/load-tests/pkg/journey/handle_integration_test_scenarios.go index 11a93276fb..28783c6bde 100644 --- a/tests/load-tests/pkg/journey/handle_integration_test_scenarios.go +++ b/tests/load-tests/pkg/journey/handle_integration_test_scenarios.go @@ -2,33 +2,59 @@ package journey import ( "fmt" + "time" logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" + types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" framework "github.com/konflux-ci/e2e-tests/pkg/framework" - util "github.com/devfile/library/v2/pkg/util" + utils "github.com/konflux-ci/e2e-tests/pkg/utils" ) -func createIntegrationTestScenario(f *framework.Framework, namespace, name, appName, scenarioGitURL, scenarioRevision, scenarioPathInRepo string) error { - _, err := f.AsKubeDeveloper.IntegrationController.CreateIntegrationTestScenario(name, appName, namespace, scenarioGitURL, scenarioRevision, scenarioPathInRepo, "", []string{}) +func createIntegrationTestScenario(f *framework.Framework, namespace, appName, scenarioGitURL, scenarioRevision, scenarioPathInRepo string) (string, error) { + interval := time.Second * 10 + timeout := time.Minute * 1 + + name := fmt.Sprintf("%s-its", appName) + logging.Logger.Debug("Creating integration test scenario %s for application %s in namespace %s", name, appName, namespace) + + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + _, err = f.AsKubeDeveloper.IntegrationController.CreateIntegrationTestScenario(name, appName, namespace, scenarioGitURL, scenarioRevision, scenarioPathInRepo, "", []string{}) + if err != nil { + logging.Logger.Debug("Failed to create the Integration Test Scenario %s in namespace %s: %v", name, namespace, err) + return false, nil + } + + return true, nil + }, interval, timeout) if err != nil { - return fmt.Errorf("Unable to create the Integration Test Scenario %s: %v", name, err) + return "", fmt.Errorf("Unable to create the Integration Test Scenario %s in namespace %s: %v", name, namespace, err) } - return nil + + return name, nil } -func HandleIntegrationTestScenario(ctx *PerApplicationContext) error { - var err error +func HandleIntegrationTestScenario(ctx *types.PerApplicationContext) error { + if ctx.IntegrationTestScenarioName != "" { + logging.Logger.Debug("Skipping integration test scenario creation because reusing integration test scenario %s in namespace %s", ctx.IntegrationTestScenarioName, ctx.ParentContext.Namespace) + return nil + } - name := fmt.Sprintf("%s-its-%s", ctx.ParentContext.Username, util.GenerateRandomString(5)) - logging.Logger.Debug("Creating integration test scenario %s for application %s in namespace %s", name, ctx.ApplicationName, ctx.ParentContext.Namespace) + if ctx.ParentContext.Opts.TestScenarioGitURL == "" { + logging.Logger.Debug("Skipping integration test scenario creation because GIT was not provided") + return nil + } - _, err = logging.Measure( + var iface interface{} + var err error + var ok bool + + iface, err = logging.Measure( + ctx, createIntegrationTestScenario, ctx.Framework, ctx.ParentContext.Namespace, - name, ctx.ApplicationName, ctx.ParentContext.Opts.TestScenarioGitURL, ctx.ParentContext.Opts.TestScenarioRevision, @@ -38,7 +64,10 @@ func HandleIntegrationTestScenario(ctx *PerApplicationContext) error { return logging.Logger.Fail(40, "Integration test scenario failed creation: %v", err) } - ctx.IntegrationTestScenarioName = name + ctx.IntegrationTestScenarioName, ok = iface.(string) + if !ok { + return logging.Logger.Fail(41, "Type assertion failed on integration test scenario name: %+v", iface) + } return nil } diff --git a/tests/load-tests/pkg/journey/handle_persistent_volume_claim.go b/tests/load-tests/pkg/journey/handle_persistent_volume_claim.go index 4f6fa1dbb5..d289cea5c2 100644 --- a/tests/load-tests/pkg/journey/handle_persistent_volume_claim.go +++ b/tests/load-tests/pkg/journey/handle_persistent_volume_claim.go @@ -4,6 +4,7 @@ import "context" import "fmt" import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" import framework "github.com/konflux-ci/e2e-tests/pkg/framework" import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,12 +21,12 @@ func collectPersistentVolumeClaims(f *framework.Framework, namespace string) err continue } waittime := (pv.ObjectMeta.CreationTimestamp.Time).Sub(pvc.ObjectMeta.CreationTimestamp.Time) - logging.LogMeasurement("PVC_to_PV_CreationTimestamp", map[string]string{"pv.Name": pv.Name}, waittime, "", nil) + logging.LogMeasurement("PVC_to_PV_CreationTimestamp", -1, -1, -1, -1, map[string]string{"pv.Name": pv.Name}, waittime, "", nil) } return nil } -func HandlePersistentVolumeClaim(ctx *MainContext) error { +func HandlePersistentVolumeClaim(ctx *types.PerUserContext) error { if !ctx.Opts.WaitPipelines { return nil // if build pipeline runs are not done yet, it does not make sense to collect PV timings } diff --git a/tests/load-tests/pkg/journey/handle_pipeline.go b/tests/load-tests/pkg/journey/handle_pipeline.go index 3e96afac46..20b7a05e07 100644 --- a/tests/load-tests/pkg/journey/handle_pipeline.go +++ b/tests/load-tests/pkg/journey/handle_pipeline.go @@ -6,6 +6,7 @@ import ( "time" logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" + types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" framework "github.com/konflux-ci/e2e-tests/pkg/framework" @@ -17,14 +18,17 @@ import ( func validatePipelineRunCreation(f *framework.Framework, namespace, appName, compName string) error { interval := time.Second * 20 timeout := time.Minute * 30 + var pr *pipeline.PipelineRun // TODO It would be much better to watch this resource for a condition err := utils.WaitUntilWithInterval(func() (done bool, err error) { - _, err = f.AsKubeDeveloper.HasController.GetComponentPipelineRunWithType(compName, appName, namespace, "build", "", "") + pr, err = f.AsKubeDeveloper.HasController.GetComponentPipelineRunWithType(compName, appName, namespace, "build", "", "") if err != nil { logging.Logger.Debug("Unable to get created PipelineRun for component %s in namespace %s: %v", compName, namespace, err) return false, nil } + + logging.Logger.Debug("Build PipelineRun %s for component %s in namespace %s created", pr.GetName(), compName, namespace) return true, nil }, interval, timeout) @@ -106,16 +110,17 @@ func validatePipelineRunSignature(f *framework.Framework, namespace, appName, co return err } -func HandlePipelineRun(ctx *PerComponentContext) error { +func HandlePipelineRun(ctx *types.PerComponentContext) error { if !ctx.ParentContext.ParentContext.Opts.WaitPipelines { return nil } var err error - logging.Logger.Debug("Creating build pipeline run for component %s in namespace %s", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + logging.Logger.Debug("Waiting for build pipeline run for component %s in namespace %s to be created", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) _, err = logging.Measure( + ctx, validatePipelineRunCreation, ctx.Framework, ctx.ParentContext.ParentContext.Namespace, @@ -126,7 +131,10 @@ func HandlePipelineRun(ctx *PerComponentContext) error { return logging.Logger.Fail(70, "Build Pipeline Run failed creation: %v", err) } + logging.Logger.Debug("Waiting for build pipeline run for component %s in namespace %s to finish", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + _, err = logging.Measure( + ctx, validatePipelineRunCondition, ctx.Framework, ctx.ParentContext.ParentContext.Namespace, @@ -137,7 +145,10 @@ func HandlePipelineRun(ctx *PerComponentContext) error { return logging.Logger.Fail(71, "Build Pipeline Run failed run: %v", err) } + logging.Logger.Debug("Waiting for build pipeline run for component %s in namespace %s to be signed", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + _, err = logging.Measure( + ctx, validatePipelineRunSignature, ctx.Framework, ctx.ParentContext.ParentContext.Namespace, @@ -148,5 +159,7 @@ func HandlePipelineRun(ctx *PerComponentContext) error { return logging.Logger.Fail(72, "Build Pipeline Run failed signing: %v", err) } + logging.Logger.Info("Build pipeline run for component %s in namespace %s OK", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + return nil } diff --git a/tests/load-tests/pkg/journey/handle_purge.go b/tests/load-tests/pkg/journey/handle_purge.go index 5a1cf8dda7..7817338f58 100644 --- a/tests/load-tests/pkg/journey/handle_purge.go +++ b/tests/load-tests/pkg/journey/handle_purge.go @@ -22,6 +22,11 @@ func purgeStage(f *framework.Framework, namespace string) error { return fmt.Errorf("Error when deleting components in namespace %s: %v", namespace, err) } + err = f.AsKubeDeveloper.HasController.DeleteAllImageRepositoriesInASpecificNamespace(namespace, time.Minute*5) + if err != nil { + return fmt.Errorf("Error when deleting image repositories in namespace %s: %v", namespace, err) + } + err = f.AsKubeDeveloper.TektonController.DeleteAllPipelineRunsInASpecificNamespace(namespace) if err != nil { return fmt.Errorf("Error when deleting pipeline runs in namespace %s: %v", namespace, err) @@ -47,13 +52,13 @@ func purgeCi(f *framework.Framework, username string) error { } func Purge() error { - if !MainContexts[0].Opts.Purge { + if !PerUserContexts[0].Opts.Purge { return nil } errCounter := 0 - for _, ctx := range MainContexts { + for _, ctx := range PerUserContexts { if ctx.Opts.Stage { err := purgeStage(ctx.Framework, ctx.Namespace) if err != nil { diff --git a/tests/load-tests/pkg/journey/handle_releases_run.go b/tests/load-tests/pkg/journey/handle_releases_run.go new file mode 100644 index 0000000000..0fd7c9d743 --- /dev/null +++ b/tests/load-tests/pkg/journey/handle_releases_run.go @@ -0,0 +1,205 @@ +package journey + +import "fmt" +import "strings" +import "time" + +import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" + +import framework "github.com/konflux-ci/e2e-tests/pkg/framework" +import utils "github.com/konflux-ci/e2e-tests/pkg/utils" +import pipeline "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + +// Wait for Release CR to be created +func validateReleaseCreation(f *framework.Framework, namespace, snapshotName string) (string, error) { + logging.Logger.Debug("Waiting for release for snapshot %s in namespace %s to be created", snapshotName, namespace) + + var releaseName string + + interval := time.Second * 10 + timeout := time.Minute * 5 + + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + release, err := f.AsKubeDeveloper.ReleaseController.GetRelease("", snapshotName, namespace) + if err != nil { + logging.Logger.Debug("Can not get release for snapshot %s in namespace %s: %v\n", snapshotName, namespace, err) + return false, nil + } + + releaseName = release.Name + + return true, nil + }, interval, timeout) + + return releaseName, err +} + + +// Wait for release pipeline run to be created +func validateReleasePipelineRunCreation(f *framework.Framework, namespace, releaseName string) error { + logging.Logger.Debug("Waiting for release pipeline for release %s in namespace %s to be created", releaseName, namespace) + + var pr *pipeline.PipelineRun + + interval := time.Second * 10 + timeout := time.Minute * 5 + + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + pr, err = f.AsKubeDeveloper.ReleaseController.GetPipelineRunInNamespace(namespace, releaseName, namespace) + if err != nil { + logging.Logger.Debug("Pipelinerun for release %s in namespace %s not created yet: %v\n", releaseName, namespace, err) + return false, nil + } + + logging.Logger.Debug("Release PipelineRun %s for release %s in namespace %s created", pr.GetName(), releaseName, namespace) + return true, nil + }, interval, timeout) + + return err +} + + +// Wait for release pipeline run to succeed +func validateReleasePipelineRunCondition(f *framework.Framework, namespace, releaseName string) error { + logging.Logger.Debug("Waiting for release pipeline for release %s in namespace %s to finish", releaseName, namespace) + + interval := time.Second * 10 + timeout := time.Minute * 10 + + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + pipelineRun, err := f.AsKubeDeveloper.ReleaseController.GetPipelineRunInNamespace(namespace, releaseName, namespace) + if err != nil { + logging.Logger.Debug("PipelineRun for release %s in namespace %s not created yet: %v\n", releaseName, namespace, err) + return false, nil + } + + // Check if there are some conditions + if len(pipelineRun.Status.Conditions) == 0 { + logging.Logger.Debug("PipelineRun %s in namespace %s lacks status conditions\n", pipelineRun.GetName(), pipelineRun.GetNamespace()) + return false, nil + } + + // Check right condition status + for _, condition := range pipelineRun.Status.Conditions { + if (strings.HasPrefix(string(condition.Type), "Error") || strings.HasSuffix(string(condition.Type), "Error")) && condition.Status == "True" { + return false, fmt.Errorf("PipelineRun %s in namespace %s is in error state: %+v", pipelineRun.GetName(), pipelineRun.GetNamespace(), condition) + } + if condition.Type == "Succeeded" && condition.Status == "False" { + return false, fmt.Errorf("PipelineRun %s in namespace %s failed: %+v", pipelineRun.GetName(), pipelineRun.GetNamespace(), condition) + } + if condition.Type == "Succeeded" && condition.Status == "True" { + return true, nil + } + } + + return false, nil + }, interval, timeout) + + return err +} + + +// Wait for Release CR to have a succeeding status +func validateReleaseCondition(f *framework.Framework, namespace, releaseName string) error { + logging.Logger.Debug("Waiting for release %s in namespace %s to finish", releaseName, namespace) + + interval := time.Second * 10 + timeout := time.Minute * 5 + + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + release, err := f.AsKubeDeveloper.ReleaseController.GetRelease(releaseName, "", namespace) + if err != nil { + logging.Logger.Debug("Can not get release %s in namespace %s: %v\n", releaseName, namespace, err) + return false, nil + } + + // Check if there are some conditions + if len(release.Status.Conditions) == 0 { + logging.Logger.Debug("Release %s in namespace %s lacks status conditions\n", releaseName, namespace) + return false, nil + } + + // Check right condition status + for _, condition := range release.Status.Conditions { + if condition.Type == "Released" && condition.Reason == "Progressing" { + return false, nil + } + if condition.Type == "Released" && condition.Status == "False" { + return false, fmt.Errorf("Release %s in namespace %s failed: %+v", releaseName, namespace, condition) + } + if condition.Type == "Released" && condition.Status == "True" { + return true, nil + } + } + + return false, nil + }, interval, timeout) + + return err +} + + +func HandleReleaseRun(ctx *types.PerComponentContext) error { + if ctx.ParentContext.ParentContext.Opts.ReleasePolicy == "" || !ctx.ParentContext.ParentContext.Opts.WaitRelease { + logging.Logger.Info("Skipping waiting for releases because policy was not provided or waiting was disabled") + return nil + } + + var iface interface{} + var ok bool + var err error + + iface, err = logging.Measure( + ctx, + validateReleaseCreation, + ctx.Framework, + ctx.ParentContext.ParentContext.Namespace, + ctx.SnapshotName, + ) + if err != nil { + return logging.Logger.Fail(90, "Release failed creation: %v", err) + } + + ctx.ReleaseName, ok = iface.(string) + if !ok { + return logging.Logger.Fail(91, "Type assertion failed on release name: %+v", iface) + } + + _, err = logging.Measure( + ctx, + validateReleasePipelineRunCreation, + ctx.Framework, + ctx.ParentContext.ParentContext.Namespace, + ctx.ReleaseName, + ) + if err != nil { + return logging.Logger.Fail(92, "Release pipeline run failed creation: %v", err) + } + + _, err = logging.Measure( + ctx, + validateReleasePipelineRunCondition, + ctx.Framework, + ctx.ParentContext.ParentContext.Namespace, + ctx.ReleaseName, + ) + if err != nil { + return logging.Logger.Fail(93, "Release pipeline run failed: %v", err) + } + + _, err = logging.Measure( + ctx, + validateReleaseCondition, + ctx.Framework, + ctx.ParentContext.ParentContext.Namespace, + ctx.ReleaseName, + ) + if err != nil { + return logging.Logger.Fail(94, "Release failed: %v", err) + } + + logging.Logger.Info("Release %s in namespace %s succeeded", ctx.ReleaseName, ctx.ParentContext.ParentContext.Namespace) + + return nil +} diff --git a/tests/load-tests/pkg/journey/handle_releases_setup.go b/tests/load-tests/pkg/journey/handle_releases_setup.go new file mode 100644 index 0000000000..215f760abf --- /dev/null +++ b/tests/load-tests/pkg/journey/handle_releases_setup.go @@ -0,0 +1,213 @@ +package journey + +import "fmt" +import "time" + +import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" + +import framework "github.com/konflux-ci/e2e-tests/pkg/framework" +import meta "k8s.io/apimachinery/pkg/api/meta" +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import releaseApi "github.com/konflux-ci/release-service/api/v1alpha1" +import tektonutils "github.com/konflux-ci/release-service/tekton/utils" +import utils "github.com/konflux-ci/e2e-tests/pkg/utils" + +// Create ReleasePlan CR +func createReleasePlan(f *framework.Framework, namespace, appName string) (string, error) { + name := appName + "-rp" + logging.Logger.Debug("Creating release plan %s in namespace %s", name, namespace) + + _, err := f.AsKubeDeveloper.ReleaseController.CreateReleasePlan(name, namespace, appName, namespace, "true", nil, nil, nil) + if err != nil { + return "", fmt.Errorf("Unable to create the ReleasePlan %s in %s: %v", name, namespace, err) + } + + return name, nil +} + + +// Create ReleasePlanAdmission CR +// Assumes enterprise contract policy and service account with required permissions is already there +func createReleasePlanAdmission(f *framework.Framework, namespace, appName, policyName, releasePipelineSAName, releasePipelineUrl, releasePipelineRevision, releasePipelinePath string) (string, error) { + name := appName + "-rpa" + logging.Logger.Debug("Creating release plan admission %s in namespace %s with policy %s and pipeline SA %s", name, namespace, policyName, releasePipelineSAName) + + pipeline := &tektonutils.PipelineRef{ + Resolver: "git", + Params: []tektonutils.Param{ + {Name: "url", Value: releasePipelineUrl}, + {Name: "revision", Value: releasePipelineRevision}, + {Name: "pathInRepo", Value: releasePipelinePath}, + }, + } + // CreateReleasePlanAdmission(name, namespace, environment, origin, policy, serviceAccountName string, applications []string, autoRelease bool, pipelineRef *tektonutils.PipelineRef, data *runtime.RawExtension) + _, err := f.AsKubeDeveloper.ReleaseController.CreateReleasePlanAdmission(name, namespace, "", namespace, policyName, releasePipelineSAName, []string{appName}, true, pipeline, nil) + if err != nil { + return "", fmt.Errorf("Unable to create the ReleasePlanAdmission %s in %s: %v", name, namespace, err) + } + + return name, nil +} + + +// Wait for ReleasePlan CR to be created and to have status "Matched" +func validateReleasePlan(f *framework.Framework, namespace, name string) error { + logging.Logger.Debug("Validating release plan %s in namespace %s", name, namespace) + + interval := time.Second * 10 + timeout := time.Minute * 5 + + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + releasePlan, err := f.AsKubeDeveloper.ReleaseController.GetReleasePlan(name, namespace) + if err != nil { + logging.Logger.Debug("Unable to get ReleasePlan %s in %s: %v\n", name, namespace, err) + return false, nil + } + + condition := meta.FindStatusCondition(releasePlan.Status.Conditions, releaseApi.MatchedConditionType.String()) + if condition == nil { + logging.Logger.Debug("MatchedConditon of %s is still not set\n", releasePlan.Name) + return false, nil + } + // it may need a period of time for the ReleasePlanCR to be reconciled + if condition.Status == metav1.ConditionFalse { + logging.Logger.Debug("MatchedConditon of %s has not reconciled yet\n", releasePlan.Name) + return false, nil + } + if condition.Status != metav1.ConditionTrue { + logging.Logger.Debug("MatchedConditon of %s is not true yet\n", releasePlan.Name) + return false, nil + } + if condition.Reason == releaseApi.MatchedReason.String() { + return true, nil + } + + return false, fmt.Errorf("MatchedConditon of %s incorrect: %v", releasePlan.Name, condition) + }, interval, timeout) + + return err +} + + +// Wait for ReleasePlanAdmission CR to be created and to have status "Matched" +func validateReleasePlanAdmission(f *framework.Framework, namespace, name string) error { + logging.Logger.Debug("Validating release plan admission %s in namespace %s", name, namespace) + + interval := time.Second * 10 + timeout := time.Minute * 5 + + err := utils.WaitUntilWithInterval(func() (done bool, err error) { + releasePlanAdmission, err := f.AsKubeDeveloper.ReleaseController.GetReleasePlanAdmission(name, namespace) + if err != nil { + logging.Logger.Debug("Unable to get ReleasePlanAdmission %s in %s: %v\n", name, namespace, err) + return false, nil + } + + condition := meta.FindStatusCondition(releasePlanAdmission.Status.Conditions, releaseApi.MatchedConditionType.String()) + if condition == nil { + logging.Logger.Debug("MatchedConditon of %s is still not set\n", releasePlanAdmission.Name) + return false, nil + } + // it may need a period of time for the ReleasePlanCR to be reconciled + if condition.Status == metav1.ConditionFalse { + logging.Logger.Debug("MatchedConditon of %s has not reconciled yet\n", releasePlanAdmission.Name) + return false, nil + } + if condition.Status != metav1.ConditionTrue { + logging.Logger.Debug("MatchedConditon of %s is not true yet\n", releasePlanAdmission.Name) + return false, nil + } + if condition.Reason == releaseApi.MatchedReason.String() { + return true, nil + } + + return false, fmt.Errorf("MatchedConditon of %s incorrect: %v", releasePlanAdmission.Name, condition) + }, interval, timeout) + + return err +} + + +func HandleReleaseSetup(ctx *types.PerApplicationContext) error { + if ctx.ReleasePlanName != "" { + if ctx.ReleasePlanAdmissionName == "" { + return logging.Logger.Fail(90, "We are supposed to reuse RPA, but it was not configured") + } + logging.Logger.Debug("Skipping setting up releases because reusing release plan %s and release plan admission %s in namespace %s", ctx.ReleasePlanName, ctx.ReleasePlanAdmissionName, ctx.ParentContext.Namespace) + return nil + } + + if ctx.ParentContext.Opts.ReleasePolicy == "" { + logging.Logger.Info("Skipping setting up releases because policy was not provided") + return nil + } + + var iface interface{} + var ok bool + var err error + + iface, err = logging.Measure( + ctx, + createReleasePlan, + ctx.Framework, + ctx.ParentContext.Namespace, + ctx.ApplicationName, + ) + if err != nil { + return logging.Logger.Fail(91, "Release Plan failed creation: %v", err) + } + + ctx.ReleasePlanName, ok = iface.(string) + if !ok { + return logging.Logger.Fail(92, "Type assertion failed on release plan name: %+v", iface) + } + + iface, err = logging.Measure( + ctx, + createReleasePlanAdmission, + ctx.Framework, + ctx.ParentContext.Namespace, + ctx.ApplicationName, + ctx.ParentContext.Opts.ReleasePolicy, + ctx.ParentContext.Opts.ReleasePipelineServiceAccount, + ctx.ParentContext.Opts.ReleasePipelineUrl, + ctx.ParentContext.Opts.ReleasePipelineRevision, + ctx.ParentContext.Opts.ReleasePipelinePath, + ) + if err != nil { + return logging.Logger.Fail(93, "Release Plan Admission failed creation: %v", err) + } + + ctx.ReleasePlanAdmissionName, ok = iface.(string) + if !ok { + return logging.Logger.Fail(94, "Type assertion failed on release plan admission name: %+v", iface) + } + + _, err = logging.Measure( + ctx, + validateReleasePlan, + ctx.Framework, + ctx.ParentContext.Namespace, + ctx.ReleasePlanName, + ) + if err != nil { + return logging.Logger.Fail(95, "Release Plan failed validation: %v", err) + } + + _, err = logging.Measure( + ctx, + validateReleasePlanAdmission, + ctx.Framework, + ctx.ParentContext.Namespace, + ctx.ReleasePlanAdmissionName, + ) + if err != nil { + return logging.Logger.Fail(96, "Release Plan Admission failed validation: %v", err) + } + + + logging.Logger.Info("Configured release %s & %s for application %s in namespace %s", ctx.ReleasePlanName, ctx.ReleasePlanAdmissionName, ctx.ApplicationName, ctx.ParentContext.Namespace) + + return nil +} diff --git a/tests/load-tests/pkg/journey/handle_repo_templating.go b/tests/load-tests/pkg/journey/handle_repo_templating.go index 967e42164c..608ac39a7b 100644 --- a/tests/load-tests/pkg/journey/handle_repo_templating.go +++ b/tests/load-tests/pkg/journey/handle_repo_templating.go @@ -3,11 +3,14 @@ package journey import "fmt" import "strings" import "regexp" +import "time" import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" import framework "github.com/konflux-ci/e2e-tests/pkg/framework" import github "github.com/google/go-github/v44/github" +import utils "github.com/konflux-ci/e2e-tests/pkg/utils" var fileList = []string{"COMPONENT-pull-request.yaml", "COMPONENT-push.yaml"} @@ -19,63 +22,123 @@ func getRepoNameFromRepoUrl(repoUrl string) (string, error) { // repoUrl: https://github.com/abc/nodejs-devfile-sample.git, match[1]: nodejs-devfile-sample // repoUrl: https://github.com/abc/nodejs-devfile-sample/, match[1]: nodejs-devfile-sample // repoUrl: https://github.com/abc/nodejs-devfile-sample, match[1]: nodejs-devfile-sample - // repoUrl: https://gitlab.example.com/abc/nodejs-devfile-sample, match[1]: abc/nodejs-devfile-sample - var regex *regexp.Regexp - if strings.Contains(repoUrl, "gitlab.") { - regex = regexp.MustCompile(`/([^/]+/[^/]+?)(.git)?/?$`) + // repoUrl: https://gitlab.example.com/abc/nodejs-devfile-sample, match[1]: nodejs-devfile-sample + // repoUrl: https://gitlab.example.com/abc/def/nodejs-devfile-sample, match[1]: nodejs-devfile-sample + regex := regexp.MustCompile(`/([^/]+?)(.git)?/?$`) + match := regex.FindStringSubmatch(repoUrl) + if match != nil { + return match[1], nil } else { - regex = regexp.MustCompile(`/([^/]+?)(.git)?/?$`) + return "", fmt.Errorf("Failed to parse repo name out of url %s", repoUrl) } +} + +// Parse repo organization out of repo url +func getRepoOrgFromRepoUrl(repoUrl string) (string, error) { + // Answer taken from https://stackoverflow.com/questions/7124778/how-can-i-match-anything-up-until-this-sequence-of-characters-in-a-regular-exp + // Tested with these input data: + // repoUrl: https://github.com/abc/nodejs-devfile-sample.git/, match[1]: abc + // repoUrl: https://github.com/abc/nodejs-devfile-sample.git, match[1]: abc + // repoUrl: https://github.com/abc/nodejs-devfile-sample/, match[1]: abc + // repoUrl: https://github.com/abc/nodejs-devfile-sample, match[1]: abc + // repoUrl: https://gitlab.example.com/abc/nodejs-devfile-sample, match[1]: abc + // repoUrl: https://gitlab.example.com/abc/def/nodejs-devfile-sample, match[1]: abc/def + regex := regexp.MustCompile(`^[^/]+://[^/]+/(.*)/.+(.git)?/?$`) match := regex.FindStringSubmatch(repoUrl) if match != nil { return match[1], nil } else { - return "", fmt.Errorf("Failed to parse repo name out of url %s", repoUrl) + return "", fmt.Errorf("Failed to parse repo org out of url %s", repoUrl) } } -// Template file from '.template/...' to '.tekton/...', expanding placeholders (even in file name) using Github API -// Returns SHA of the commit -func templateRepoFileGithub(f *framework.Framework, repoName, repoRevision, fileName string, placeholders *map[string]string) (string, error) { - var fileResponse *github.RepositoryContent +// Parse repo ID (/) out of repo url +func getRepoIdFromRepoUrl(repoUrl string) (string, error) { + repoOrgName, err := getRepoOrgFromRepoUrl(repoUrl) + if err != nil { + return "", err + } + repoName, err := getRepoNameFromRepoUrl(repoUrl) + if err != nil { + return "", err + } + return repoOrgName + "/" + repoName, nil +} + +// Get file content from repository, no matter if on GitLab or GitHub +func getRepoFileContent(f *framework.Framework, repoUrl, repoRevision, fileName string) (string, error) { var fileContent string - var repoContentResponse *github.RepositoryContentResponse - var err error - fileResponse, err = f.AsKubeAdmin.CommonController.Github.GetFile(repoName, ".template/" + fileName, repoRevision) + repoName, err := getRepoNameFromRepoUrl(repoUrl) if err != nil { return "", err } - - fileContent, err = fileResponse.GetContent() + repoOrgName, err := getRepoOrgFromRepoUrl(repoUrl) if err != nil { return "", err } - for key, value := range *placeholders { - fileContent = strings.ReplaceAll(fileContent, key, value) - fileName = strings.ReplaceAll(fileName, key, value) + if strings.Contains(repoUrl, "gitlab.") { + fileContent, err = f.AsKubeAdmin.CommonController.Gitlab.GetFile(repoOrgName + "/" + repoName, fileName, repoRevision) + if err != nil { + return "", fmt.Errorf("Failed to get file %s from repo %s revision %s: %v", fileName, repoOrgName + "/" + repoName, repoRevision, err) + } + } else { + fileResponse, err := f.AsKubeAdmin.CommonController.Github.GetFileWithOrg(repoOrgName, repoName, fileName, repoRevision) + if err != nil { + return "", fmt.Errorf("Failed to get file %s from repo %s revision %s: %v", fileName, repoName, repoRevision, err) + } + + fileContent, err = fileResponse.GetContent() + if err != nil { + return "", err + } } - fileResponse, err = f.AsKubeAdmin.CommonController.Github.GetFile(repoName, ".tekton/" + fileName, repoRevision) + return fileContent, nil +} + +// Update file content in repository, no matter if on GitLab or GitHub +func updateRepoFileContent(f *framework.Framework, repoUrl, repoRevision, fileName, fileContent string) (string, error) { + var commitSha string + + repoName, err := getRepoNameFromRepoUrl(repoUrl) if err != nil { return "", err } - - repoContentResponse, err = f.AsKubeAdmin.CommonController.Github.UpdateFile(repoName, ".tekton/" + fileName, fileContent, repoRevision, *fileResponse.SHA) + repoOrgName, err := getRepoOrgFromRepoUrl(repoUrl) if err != nil { return "", err } - return *repoContentResponse.Commit.SHA, nil + if strings.Contains(repoUrl, "gitlab.") { + commitSha, err = f.AsKubeAdmin.CommonController.Gitlab.UpdateFile(repoOrgName + "/" + repoName, fileName, fileContent, repoRevision) + if err != nil { + return "", fmt.Errorf("Failed to update file %s in repo %s revision %s: %v", fileName, repoOrgName + "/" + repoName, repoRevision, err) + } + } else { + fileResponse, err := f.AsKubeAdmin.CommonController.Github.GetFile(repoName, fileName, repoRevision) + if err != nil { + return "", fmt.Errorf("Failed to get file %s from repo %s revision %s: %v", fileName, repoName, repoRevision, err) + } + + repoContentResponse, err := f.AsKubeAdmin.CommonController.Github.UpdateFile(repoName, fileName, fileContent, repoRevision, *fileResponse.SHA) + if err != nil { + return "", fmt.Errorf("Failed to update file %s in repo %s revision %s: %v", fileName, repoName, repoRevision, err) + } + + commitSha = *repoContentResponse.Commit.SHA + } + + return commitSha, nil } -// Template file from '.template/...' to '.tekton/...', expanding placeholders (even in file name) using Gitlab API +// Template file from source repo and dir to '.tekton/...' in component repo, expanding placeholders (even in file name), no matter if on GitLab or GitHub // Returns SHA of the commit -func templateRepoFileGitlab(f *framework.Framework, repoName, repoRevision, fileName string, placeholders *map[string]string) (string, error) { - fileContent, err := f.AsKubeAdmin.CommonController.Gitlab.GetFile(repoName, ".template/" + fileName, repoRevision) +func templateRepoFile(f *framework.Framework, repoUrl, repoRevision, sourceRepo, sourceRepoDir, fileName string, placeholders *map[string]string) (string, error) { + fileContent, err := getRepoFileContent(f, sourceRepo, "main", sourceRepoDir + fileName) if err != nil { - return "", fmt.Errorf("Failed to get file: %v", err) + return "", err } for key, value := range *placeholders { @@ -83,39 +146,50 @@ func templateRepoFileGitlab(f *framework.Framework, repoName, repoRevision, file fileName = strings.ReplaceAll(fileName, key, value) } - commitID, err := f.AsKubeAdmin.CommonController.Gitlab.UpdateFile(repoName, ".tekton/" + fileName, fileContent, repoRevision) + commitSha, err := updateRepoFileContent(f, repoUrl, repoRevision, ".tekton/" + fileName, fileContent) if err != nil { - return "", fmt.Errorf("Failed to update file: %v", err) + return "", err } - logging.Logger.Info("Templated file %s with commit %s", fileName, commitID) - return commitID, nil + return commitSha, nil } // Fork repository and return forked repo URL -func ForkRepo(f *framework.Framework, repoUrl, repoRevision, username string) (string, error) { +func ForkRepo(f *framework.Framework, repoUrl, repoRevision, suffix, targetOrgName string) (string, error) { // For PaC testing, let's template repo and return forked repo name var forkRepo *github.Repository var sourceName string + var sourceOrgName string var targetName string var err error - // Parse just repo name out of input repo url and construct target repo name + // Parse just repo name and org out of input repo url and construct target repo name sourceName, err = getRepoNameFromRepoUrl(repoUrl) if err != nil { return "", err } - targetName = fmt.Sprintf("%s-%s", sourceName, username) + sourceOrgName, err = getRepoOrgFromRepoUrl(repoUrl) + if err != nil { + return "", err + } + + targetName = fmt.Sprintf("%s-%s", sourceName, suffix) if strings.Contains(repoUrl, "gitlab.") { - logging.Logger.Debug("Forking Gitlab repository %s", repoUrl) + // Cleanup if it already exists + err = f.AsKubeAdmin.CommonController.Gitlab.DeleteRepositoryIfExists(targetOrgName + "/" + targetName) + if err != nil { + return "", err + } - logging.Logger.Warning("Forking Gitlab repository not implemented yet, this will only work with 1 concurrent user") // TODO + // Create fork and make sure it appears + forkedRepoURL, err := f.AsKubeAdmin.CommonController.Gitlab.ForkRepository(sourceOrgName, sourceName, targetOrgName, targetName) + if err != nil { + return "", err + } - return repoUrl, nil + return forkedRepoURL.WebURL, nil } else { - logging.Logger.Debug("Forking Github repository %s", repoUrl) - // Cleanup if it already exists err = f.AsKubeAdmin.CommonController.Github.DeleteRepositoryIfExists(targetName) if err != nil { @@ -123,7 +197,14 @@ func ForkRepo(f *framework.Framework, repoUrl, repoRevision, username string) (s } // Create fork and make sure it appears - forkRepo, err = f.AsKubeAdmin.CommonController.Github.ForkRepository(sourceName, targetName) + err = utils.WaitUntilWithInterval(func() (done bool, err error) { + forkRepo, err = f.AsKubeAdmin.CommonController.Github.ForkRepositoryWithOrgs(sourceOrgName, sourceName, targetOrgName, targetName) + if err != nil { + logging.Logger.Debug("Repo forking failed, trying again: %v", err) + return false, nil + } + return true, nil + }, time.Second * 20, time.Minute * 10) if err != nil { return "", err } @@ -133,45 +214,112 @@ func ForkRepo(f *framework.Framework, repoUrl, repoRevision, username string) (s } // Template PaC files -func templateFiles(f *framework.Framework, repoUrl, repoRevision string, placeholders *map[string]string) (*map[string]string, error) { - var sha string - - // Get repo name from repo url - repoName, err := getRepoNameFromRepoUrl(repoUrl) - if err != nil { - return nil, err - } - +func templateFiles(f *framework.Framework, repoUrl, repoRevision, sourceRepo, sourceRepoDir string, placeholders *map[string]string) (*map[string]string, error) { // Template files we care about shaMap := &map[string]string{} for _, file := range fileList { - if strings.Contains(repoUrl, "gitlab.") { - sha, err = templateRepoFileGitlab(f, repoName, repoRevision, file, placeholders) - } else { - sha, err = templateRepoFileGithub(f, repoName, repoRevision, file, placeholders) - } + sha, err := templateRepoFile(f, repoUrl, repoRevision, sourceRepo, sourceRepoDir, file, placeholders) if err != nil { return nil, err } + logging.Logger.Debug("Templated file %s with commit %s", file, sha) (*shaMap)[file] = sha } return shaMap, nil } -func HandleRepoForking(ctx *MainContext) error { - logging.Logger.Debug("Forking repository %s for user %s", ctx.Opts.ComponentRepoUrl, ctx.Username) +// doHarmlessCommit creates or updates file "just-trigger-build" with current timestamp and commits it +func doHarmlessCommit(f *framework.Framework, repoUrl, repoRevision string) (string, error) { + fileName := "just-trigger-build" + var fileContent string + var sha *string + var commitSha string + + repoName, err := getRepoNameFromRepoUrl(repoUrl) + if err != nil { + return "", err + } + repoOrgName, err := getRepoOrgFromRepoUrl(repoUrl) + if err != nil { + return "", err + } + + if strings.Contains(repoUrl, "gitlab.") { + // For gitlab, we can get file content. If it fails, we assume it doesn't exist. + // The UpdateFile API for gitlab creates the file if it doesn't exist. + existingContent, err := f.AsKubeAdmin.CommonController.Gitlab.GetFile(repoOrgName+"/"+repoName, fileName, repoRevision) + if err != nil { + logging.Logger.Debug("Failed to get file %s from repo %s, assuming it does not exist: %v", fileName, repoUrl, err) + fileContent = "" + } else { + fileContent = existingContent + } + fileContent += fmt.Sprintf("\n# %s", time.Now().String()) + + commitSha, err = f.AsKubeAdmin.CommonController.Gitlab.UpdateFile(repoOrgName+"/"+repoName, fileName, fileContent, repoRevision) + if err != nil { + return "", fmt.Errorf("Failed to update file %s in repo %s revision %s: %v", fileName, repoOrgName+"/"+repoName, repoRevision, err) + } + } else { + // For github, we need to get SHA if file exists. + fileResponse, err := f.AsKubeAdmin.CommonController.Github.GetFile(repoName, fileName, repoRevision) + if err != nil { + // Assuming error means not found. + logging.Logger.Debug("File %s not found in repo %s, will create it.", fileName, repoUrl) + fileContent = "" + sha = nil + } else { + existingContent, err := fileResponse.GetContent() + if err != nil { + return "", err + } + fileContent = existingContent + sha = fileResponse.SHA + } + + fileContent += fmt.Sprintf("\n# %s", time.Now().String()) + + if sha == nil { + // We have to assume a CreateFile function exists in the framework's github controller + repoContentResponse, err := f.AsKubeAdmin.CommonController.Github.CreateFile(repoName, fileName, fileContent, repoRevision) + if err != nil { + return "", fmt.Errorf("Failed to create file %s in repo %s: %v", fileName, repoUrl, err) + } + commitSha = *repoContentResponse.Commit.SHA + } else { + repoContentResponse, err := f.AsKubeAdmin.CommonController.Github.UpdateFile(repoName, fileName, fileContent, repoRevision, *sha) + if err != nil { + return "", fmt.Errorf("Failed to update file %s in repo %s: %v", fileName, repoUrl, err) + } + commitSha = *repoContentResponse.Commit.SHA + } + } + return commitSha, nil +} + +func HandleRepoForking(ctx *types.PerUserContext) error { + var suffix string + if ctx.Opts.Stage { + suffix = ctx.Opts.RunPrefix + "-" + ctx.Namespace + } else { + suffix = ctx.Namespace + } + logging.Logger.Debug("Forking repository %s with suffix %s to %s", ctx.Opts.ComponentRepoUrl, suffix, ctx.Opts.ForkTarget) forkUrl, err := ForkRepo( ctx.Framework, ctx.Opts.ComponentRepoUrl, ctx.Opts.ComponentRepoRevision, - ctx.Username, + suffix, + ctx.Opts.ForkTarget, ) if err != nil { return logging.Logger.Fail(80, "Repo forking failed: %v", err) } + logging.Logger.Info("Forked %s to %s", ctx.Opts.ComponentRepoUrl, forkUrl) + ctx.ComponentRepoUrl = forkUrl return nil diff --git a/tests/load-tests/pkg/journey/handle_test_run.go b/tests/load-tests/pkg/journey/handle_test_run.go index 1b5d71f87f..3f280b7389 100644 --- a/tests/load-tests/pkg/journey/handle_test_run.go +++ b/tests/load-tests/pkg/journey/handle_test_run.go @@ -5,6 +5,7 @@ import "strings" import "time" import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" import appstudioApi "github.com/konflux-ci/application-api/api/v1alpha1" import framework "github.com/konflux-ci/e2e-tests/pkg/framework" @@ -12,8 +13,10 @@ import utils "github.com/konflux-ci/e2e-tests/pkg/utils" import pipeline "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" func validateSnapshotCreation(f *framework.Framework, namespace, compName string) (string, error) { + logging.Logger.Debug("Waiting for snapshot for component %s in namespace %s to be created", compName, namespace) + interval := time.Second * 20 - timeout := time.Minute * 30 + timeout := time.Minute * 5 var snap *appstudioApi.Snapshot // TODO It would be much better to watch this resource for a condition @@ -26,20 +29,29 @@ func validateSnapshotCreation(f *framework.Framework, namespace, compName string return true, nil }, interval, timeout) + if err != nil { + return "", err + } + return snap.Name, err } func validateTestPipelineRunCreation(f *framework.Framework, namespace, itsName, snapName string) error { + logging.Logger.Debug("Waiting for test pipeline run for ITS %s and snapshot %s in namespace %s to be created", itsName, snapName, namespace) + interval := time.Second * 20 - timeout := time.Minute * 30 + timeout := time.Minute * 5 + var pr *pipeline.PipelineRun // TODO It would be much better to watch this resource for a condition err := utils.WaitUntilWithInterval(func() (done bool, err error) { - _, err = f.AsKubeDeveloper.IntegrationController.GetIntegrationPipelineRun(itsName, snapName, namespace) + pr, err = f.AsKubeDeveloper.IntegrationController.GetIntegrationPipelineRun(itsName, snapName, namespace) if err != nil { logging.Logger.Debug("Unable to get created test PipelineRun for integration test pipeline %s in namespace %s: %v", itsName, namespace, err) return false, nil } + + logging.Logger.Debug("Test PipelineRun %s for its %s and snap %s in namespace %s created", pr.GetName(), itsName, snapName, namespace) return true, nil }, interval, timeout) @@ -47,8 +59,10 @@ func validateTestPipelineRunCreation(f *framework.Framework, namespace, itsName, } func validateTestPipelineRunCondition(f *framework.Framework, namespace, itsName, snapName string) error { + logging.Logger.Debug("Waiting for test pipeline run for ITS %s and snapshot %s in namespace %s to finish", itsName, snapName, namespace) + interval := time.Second * 20 - timeout := time.Minute * 60 + timeout := time.Minute * 10 var pr *pipeline.PipelineRun // TODO It would be much better to watch this resource for a condition @@ -82,7 +96,7 @@ func validateTestPipelineRunCondition(f *framework.Framework, namespace, itsName return err } -func HandleTest(ctx *PerComponentContext) error { +func HandleTest(ctx *types.PerComponentContext) error { if !ctx.ParentContext.ParentContext.Opts.WaitPipelines || !ctx.ParentContext.ParentContext.Opts.WaitIntegrationTestsPipelines { return nil } @@ -90,9 +104,8 @@ func HandleTest(ctx *PerComponentContext) error { var err error var ok bool - logging.Logger.Debug("Creating test pipeline run for component %s in namespace %s", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) - result1, err1 := logging.Measure( + ctx, validateSnapshotCreation, ctx.Framework, ctx.ParentContext.ParentContext.Namespace, @@ -106,27 +119,39 @@ func HandleTest(ctx *PerComponentContext) error { return logging.Logger.Fail(81, "Snapshot name type assertion failed") } - _, err = logging.Measure( - validateTestPipelineRunCreation, - ctx.Framework, - ctx.ParentContext.ParentContext.Namespace, - ctx.ParentContext.IntegrationTestScenarioName, - ctx.SnapshotName, - ) - if err != nil { - return logging.Logger.Fail(82, "Test Pipeline Run failed creation: %v", err) - } + if ctx.ParentContext.ParentContext.Opts.TestScenarioGitURL == "" { + logging.Logger.Debug("Integration Test Scenario GIT not provided, not waiting for it") + } else { + logging.Logger.Debug("Waiting for test pipeline run for component %s in namespace %s to be created", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + + _, err = logging.Measure( + ctx, + validateTestPipelineRunCreation, + ctx.Framework, + ctx.ParentContext.ParentContext.Namespace, + ctx.ParentContext.IntegrationTestScenarioName, + ctx.SnapshotName, + ) + if err != nil { + return logging.Logger.Fail(82, "Test Pipeline Run failed creation: %v", err) + } - _, err = logging.Measure( - validateTestPipelineRunCondition, - ctx.Framework, - ctx.ParentContext.ParentContext.Namespace, - ctx.ParentContext.IntegrationTestScenarioName, - ctx.SnapshotName, - ) - if err != nil { - return logging.Logger.Fail(83, "Test Pipeline Run failed run: %v", err) + logging.Logger.Debug("Waiting for test pipeline run for component %s in namespace %s to finish", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + + _, err = logging.Measure( + ctx, + validateTestPipelineRunCondition, + ctx.Framework, + ctx.ParentContext.ParentContext.Namespace, + ctx.ParentContext.IntegrationTestScenarioName, + ctx.SnapshotName, + ) + if err != nil { + return logging.Logger.Fail(83, "Test Pipeline Run failed run: %v", err) + } } + logging.Logger.Info("Integration Test Scenario for componet %s in namespace %s OK", ctx.ComponentName, ctx.ParentContext.ParentContext.Namespace) + return nil } diff --git a/tests/load-tests/pkg/journey/handle_users.go b/tests/load-tests/pkg/journey/handle_users.go index c580a8cae0..6f564cfeef 100644 --- a/tests/load-tests/pkg/journey/handle_users.go +++ b/tests/load-tests/pkg/journey/handle_users.go @@ -2,86 +2,89 @@ package journey import "fmt" import "time" +import "strings" import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" +import loadtestutils "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/loadtestutils" import "github.com/konflux-ci/e2e-tests/pkg/framework" import "github.com/konflux-ci/e2e-tests/pkg/utils" -func HandleUser(ctx *MainContext) error { +// Returns framework, namespace (and error) +func provisionFramework(stageUsers []loadtestutils.User, threadIndex int, username string, isStage bool) (*framework.Framework, string, error) { + var f *framework.Framework var err error - // TODO E.g. when token is incorrect, timeout does not work as expected - if ctx.Opts.Stage { - user := (*ctx.StageUsers)[ctx.ThreadIndex] - ctx.Username = user.Username - ctx.Framework, err = framework.NewFrameworkWithTimeout( - ctx.Username, + if isStage { + user := stageUsers[threadIndex] + f, err = framework.NewFrameworkWithTimeout( + username, time.Minute*60, utils.Options{ - ToolchainApiUrl: user.APIURL, - KeycloakUrl: user.SSOURL, - OfflineToken: user.Token, + ApiUrl: user.APIURL, + Token: user.Token, }) } else { - ctx.Username = fmt.Sprintf("%s-%04d", ctx.Opts.UsernamePrefix, ctx.ThreadIndex) - ctx.Framework, err = framework.NewFrameworkWithTimeout(ctx.Username, time.Minute*60) + f, err = framework.NewFrameworkWithTimeout(username, time.Minute*60) } if err != nil { - return logging.Logger.Fail(10, "Unable to provision user %s: %v", ctx.Username, err) + return nil, "", err } - ctx.Namespace = ctx.Framework.UserNamespace - - return nil + return f, f.UserNamespace, nil } -func HandleNewFrameworkForComp(ctx *PerComponentContext) error { +func HandleUser(ctx *types.PerUserContext) error { var err error - // TODO This framework generation code is duplicate to above - if ctx.ParentContext.ParentContext.Opts.Stage { - user := (*ctx.ParentContext.ParentContext.StageUsers)[ctx.ParentContext.ParentContext.ThreadIndex] - ctx.Framework, err = framework.NewFrameworkWithTimeout( - ctx.ParentContext.ParentContext.Username, - time.Minute*60, - utils.Options{ - ToolchainApiUrl: user.APIURL, - KeycloakUrl: user.SSOURL, - OfflineToken: user.Token, - }) + if ctx.Opts.Stage { + ctx.Username = strings.TrimSuffix((*ctx.StageUsers)[ctx.UserIndex].Namespace, "-tenant") } else { - ctx.Framework, err = framework.NewFrameworkWithTimeout(ctx.ParentContext.ParentContext.Username, time.Minute*60) + ctx.Username = fmt.Sprintf("%s-%04d", ctx.Opts.RunPrefix, ctx.UserIndex) } + ctx.Framework, ctx.Namespace, err = provisionFramework( + *ctx.StageUsers, + ctx.UserIndex, + ctx.Username, + ctx.Opts.Stage, + ) if err != nil { - return logging.Logger.Fail(11, "Unable to provision framework for user %s: %v", ctx.ParentContext.ParentContext.Username, err) + return logging.Logger.Fail(10, "Unable to provision user %s: %v", ctx.Username, err) } return nil } -func HandleNewFrameworkForApp(ctx *PerApplicationContext) error { +func HandleNewFrameworkForApp(ctx *types.PerApplicationContext) error { var err error - // TODO This framework generation code is duplicate to above - if ctx.ParentContext.Opts.Stage { - user := (*ctx.ParentContext.StageUsers)[ctx.ParentContext.ThreadIndex] - ctx.Framework, err = framework.NewFrameworkWithTimeout( - ctx.ParentContext.Username, - time.Minute*60, - utils.Options{ - ToolchainApiUrl: user.APIURL, - KeycloakUrl: user.SSOURL, - OfflineToken: user.Token, - }) - } else { - ctx.Framework, err = framework.NewFrameworkWithTimeout(ctx.ParentContext.Username, time.Minute*60) + ctx.Framework, _, err = provisionFramework( + *ctx.ParentContext.StageUsers, + ctx.ParentContext.UserIndex, + ctx.ParentContext.Username, + ctx.ParentContext.Opts.Stage, + ) + if err != nil { + return logging.Logger.Fail(11, "Unable to provision framework for user %s: %v", ctx.ParentContext.Username, err) } + return nil +} + +func HandleNewFrameworkForComp(ctx *types.PerComponentContext) error { + var err error + + ctx.Framework, _, err = provisionFramework( + *ctx.ParentContext.ParentContext.StageUsers, + ctx.ParentContext.ParentContext.UserIndex, + ctx.ParentContext.ParentContext.Username, + ctx.ParentContext.ParentContext.Opts.Stage, + ) if err != nil { - return logging.Logger.Fail(12, "Unable to provision framework for user %s: %v", ctx.ParentContext.Username, err) + return logging.Logger.Fail(12, "Unable to provision framework for user %s: %v", ctx.ParentContext.ParentContext.Username, err) } return nil diff --git a/tests/load-tests/pkg/journey/journey.go b/tests/load-tests/pkg/journey/journey.go index 288ac24bd5..ebf6260680 100644 --- a/tests/load-tests/pkg/journey/journey.go +++ b/tests/load-tests/pkg/journey/journey.go @@ -1,51 +1,53 @@ package journey -import "fmt" import "sync" +import "time" +import "math/rand" import options "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/options" import logging "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/logging" import loadtestutils "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/loadtestutils" - -import framework "github.com/konflux-ci/e2e-tests/pkg/framework" -import util "github.com/devfile/library/v2/pkg/util" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" // Pointers to all user journey thread contexts -var MainContexts []*MainContext - -// Struct to hold user journey thread data -type MainContext struct { - ThreadsWG *sync.WaitGroup - ThreadIndex int - JourneyRepeatsCounter int - Opts *options.Opts - StageUsers *[]loadtestutils.User - Framework *framework.Framework - Username string - Namespace string - ComponentRepoUrl string // overrides same value from Opts, needed when templating repos - PerApplicationContexts []*PerApplicationContext -} +var PerUserContexts []*types.PerUserContext // Just to create user -func initUserThread(threadCtx *MainContext) { - defer threadCtx.ThreadsWG.Done() +func initUserThread(perUserCtx *types.PerUserContext) { + defer perUserCtx.PerUserWG.Done() var err error // Create user if needed - _, err = logging.Measure(HandleUser, threadCtx) + _, err = logging.Measure( + perUserCtx, + HandleUser, + perUserCtx, + ) if err != nil { logging.Logger.Error("Thread failed: %v", err) return } } +// Helper function to compute duration to delay startup of some threads based on StartupDelay and StartupJitter command-line options +// If this is a first thread, delay will be skipped as it would not help +func computeStartupPause(index int, delay, jitter time.Duration) time.Duration { + if index == 0 || delay == 0 { + return time.Duration(0) + } else { + // For delay = 10s and jitter = 3s, this computes random number from 8.5 to 11.5 seconds + jitterSec := rand.Float64() * jitter.Seconds() - jitter.Seconds() / 2 + jitterDur := time.Duration(jitterSec) * time.Second + return delay + jitterDur + } +} + // Start all the user journey threads // TODO split this to two functions and get PurgeOnly code out -func Setup(fn func(*MainContext), opts *options.Opts) (string, error) { - threadsWG := &sync.WaitGroup{} - threadsWG.Add(opts.Concurrency) +func PerUserSetup(fn func(*types.PerUserContext), opts *options.Opts) (string, error) { + perUserWG := &sync.WaitGroup{} + perUserWG.Add(opts.Concurrency) var stageUsers []loadtestutils.User var err error @@ -57,27 +59,30 @@ func Setup(fn func(*MainContext), opts *options.Opts) (string, error) { } // Initialize all user thread contexts - for threadIndex := 0; threadIndex < opts.Concurrency; threadIndex++ { - logging.Logger.Info("Initiating thread %d", threadIndex) + for userIndex := 0; userIndex < opts.Concurrency; userIndex++ { + startupPause := computeStartupPause(userIndex, opts.StartupDelay, opts.StartupJitter) + + logging.Logger.Info("Initiating per user thread %d with pause %v", userIndex, startupPause) - threadCtx := &MainContext{ - ThreadsWG: threadsWG, - ThreadIndex: threadIndex, + perUserCtx := &types.PerUserContext{ + PerUserWG: perUserWG, + UserIndex: userIndex, + StartupPause: startupPause, Opts: opts, StageUsers: &stageUsers, Username: "", Namespace: "", } - MainContexts = append(MainContexts, threadCtx) + PerUserContexts = append(PerUserContexts, perUserCtx) } // Create all users (if necessary) and initialize their frameworks - for _, threadCtx := range MainContexts { - go initUserThread(threadCtx) + for _, perUserCtx := range PerUserContexts { + go initUserThread(perUserCtx) } - threadsWG.Wait() + perUserWG.Wait() // If we are supposed to only purge resources, now when frameworks are initialized, we are done if opts.PurgeOnly { @@ -86,49 +91,49 @@ func Setup(fn func(*MainContext), opts *options.Opts) (string, error) { } // Fork repositories sequentially as GitHub do not allow more than 3 running forks in parallel anyway - for _, threadCtx := range MainContexts { - _, err = logging.Measure(HandleRepoForking, threadCtx) + for _, perUserCtx := range PerUserContexts { + _, err = logging.Measure( + perUserCtx, + HandleRepoForking, + perUserCtx, + ) if err != nil { return "", err } } - threadsWG.Add(opts.Concurrency) + perUserWG.Add(opts.Concurrency) // Run actual user thread function - for _, threadCtx := range MainContexts { - go fn(threadCtx) + for _, perUserCtx := range PerUserContexts { + go fn(perUserCtx) } - threadsWG.Wait() + perUserWG.Wait() return "", nil } -// Struct to hold data for thread to process each application -type PerApplicationContext struct { - PerApplicationWG *sync.WaitGroup - ApplicationIndex int - Framework *framework.Framework - ParentContext *MainContext - ApplicationName string - IntegrationTestScenarioName string - PerComponentContexts []*PerComponentContext -} - // Start all the threads to process all applications per user -func PerApplicationSetup(fn func(*PerApplicationContext), parentContext *MainContext) (string, error) { +func PerApplicationSetup(fn func(*types.PerApplicationContext), parentContext *types.PerUserContext) (string, error) { perApplicationWG := &sync.WaitGroup{} perApplicationWG.Add(parentContext.Opts.ApplicationsCount) for applicationIndex := 0; applicationIndex < parentContext.Opts.ApplicationsCount; applicationIndex++ { - logging.Logger.Info("Initiating per application thread %d-%d", parentContext.ThreadIndex, applicationIndex) - - perApplicationCtx := &PerApplicationContext{ - PerApplicationWG: perApplicationWG, - ApplicationIndex: applicationIndex, - ParentContext: parentContext, - ApplicationName: fmt.Sprintf("%s-app-%s", parentContext.Username, util.GenerateRandomString(5)), + startupPause := computeStartupPause(applicationIndex, parentContext.Opts.StartupDelay, parentContext.Opts.StartupJitter) + + logging.Logger.Info("Initiating per application thread %d-%d(%d) with pause %v", parentContext.UserIndex, applicationIndex, parentContext.JourneyRepeatsCounter, startupPause) + + perApplicationCtx := &types.PerApplicationContext{ + PerApplicationWG: perApplicationWG, + ApplicationIndex: applicationIndex, + JourneyRepeatIndex: parentContext.JourneyRepeatsCounter, + StartupPause: startupPause, + ParentContext: parentContext, + ApplicationName: "", + IntegrationTestScenarioName: "", + ReleasePlanName: "", + ReleasePlanAdmissionName: "", } parentContext.PerApplicationContexts = append(parentContext.PerApplicationContexts, perApplicationCtx) @@ -141,30 +146,22 @@ func PerApplicationSetup(fn func(*PerApplicationContext), parentContext *MainCon return "", nil } -// Struct to hold data for thread to process each component -type PerComponentContext struct { - PerComponentWG *sync.WaitGroup - ComponentIndex int - Framework *framework.Framework - ParentContext *PerApplicationContext - ComponentName string - SnapshotName string - MergeRequestNumber int -} - // Start all the threads to process all components per application -func PerComponentSetup(fn func(*PerComponentContext), parentContext *PerApplicationContext) (string, error) { +func PerComponentSetup(fn func(*types.PerComponentContext), parentContext *types.PerApplicationContext) (string, error) { perComponentWG := &sync.WaitGroup{} perComponentWG.Add(parentContext.ParentContext.Opts.ComponentsCount) for componentIndex := 0; componentIndex < parentContext.ParentContext.Opts.ComponentsCount; componentIndex++ { - logging.Logger.Info("Initiating per component thread %d-%d-%d", parentContext.ParentContext.ThreadIndex, parentContext.ApplicationIndex, componentIndex) + startupPause := computeStartupPause(componentIndex, parentContext.ParentContext.Opts.StartupDelay, parentContext.ParentContext.Opts.StartupJitter) + + logging.Logger.Info("Initiating per component thread %d-%d(%d)-%d with pause %s", parentContext.ParentContext.UserIndex, parentContext.ApplicationIndex, parentContext.JourneyRepeatIndex, componentIndex, startupPause) - perComponentCtx := &PerComponentContext{ + perComponentCtx := &types.PerComponentContext{ PerComponentWG: perComponentWG, ComponentIndex: componentIndex, + StartupPause: startupPause, ParentContext: parentContext, - ComponentName: fmt.Sprintf("%s-comp-%d", parentContext.ApplicationName, componentIndex), + ComponentName: "", } parentContext.PerComponentContexts = append(parentContext.PerComponentContexts, perComponentCtx) diff --git a/tests/load-tests/pkg/journey/journey_test.go b/tests/load-tests/pkg/journey/journey_test.go new file mode 100644 index 0000000000..41178244ef --- /dev/null +++ b/tests/load-tests/pkg/journey/journey_test.go @@ -0,0 +1,92 @@ +package journey + +import "testing" + +// Test basic input and output combinations for getRepoNameFromRepoUrl. +func Test_getRepoNameFromRepoUrl(t *testing.T) { + repoName := "nodejs-devfile-sample" + repoUrls := []string{ + "https://github.com/abc/nodejs-devfile-sample.git/", + "https://github.com/abc/nodejs-devfile-sample.git", + "https://github.com/abc/nodejs-devfile-sample/", + "https://github.com/abc/nodejs-devfile-sample", + "https://gitlab.example.com/abc/nodejs-devfile-sample", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample.git", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample.git/", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample/", + } + for _, repoUrl := range repoUrls { + out, err := getRepoNameFromRepoUrl(repoUrl) + if err != nil || out != repoName { + t.Errorf("Failed getting '%s' from '%s': %v", repoName, repoUrl, err) + } + } +} + +// Test basic input and output combinations for getRepoOrgFromRepoUrl. +func Test_getRepoOrgFromRepoUrl(t *testing.T) { + repoName := "abc" + repoUrls := []string{ + "https://github.com/abc/nodejs-devfile-sample.git/", + "https://github.com/abc/nodejs-devfile-sample.git", + "https://github.com/abc/nodejs-devfile-sample/", + "https://github.com/abc/nodejs-devfile-sample", + "https://gitlab.example.com/abc/nodejs-devfile-sample", + "https://gitlab.example.com/abc/nodejs-devfile-sample.git", + "https://gitlab.example.com/abc/nodejs-devfile-sample.git/", + "https://gitlab.example.com/abc/nodejs-devfile-sample/", + } + for _, repoUrl := range repoUrls { + out, err := getRepoOrgFromRepoUrl(repoUrl) + if err != nil || out != repoName { + t.Errorf("Failed getting '%s' from '%s': %v", repoName, repoUrl, err) + } + } + + repoName = "abc/def" + repoUrls = []string{ + "https://gitlab.example.com/abc/def/nodejs-devfile-sample", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample.git", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample.git/", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample/", + } + for _, repoUrl := range repoUrls { + out, err := getRepoOrgFromRepoUrl(repoUrl) + if err != nil || out != repoName { + t.Errorf("Failed getting '%s' from '%s': %v", repoName, repoUrl, err) + } + } +} + +// Test various input and output combinations for getRepoIdFromRepoUrl. +func Test_getRepoIdFromRepoUrl(t *testing.T) { + repoName := "abc/nodejs-devfile-sample" + repoUrls := []string{ + "https://github.com/abc/nodejs-devfile-sample.git/", + "https://github.com/abc/nodejs-devfile-sample.git", + "https://github.com/abc/nodejs-devfile-sample/", + "https://github.com/abc/nodejs-devfile-sample", + "https://gitlab.example.com/abc/nodejs-devfile-sample", + } + for _, repoUrl := range repoUrls { + out, err := getRepoIdFromRepoUrl(repoUrl) + if err != nil || out != repoName { + t.Errorf("Failed getting '%s' from '%s': %v", repoName, repoUrl, err) + } + } + + repoName = "abc/def/nodejs-devfile-sample" + repoUrls = []string{ + "https://gitlab.example.com/abc/def/nodejs-devfile-sample", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample.git", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample.git/", + "https://gitlab.example.com/abc/def/nodejs-devfile-sample/", + } + for _, repoUrl := range repoUrls { + out, err := getRepoIdFromRepoUrl(repoUrl) + if err != nil || out != repoName { + t.Errorf("Failed getting '%s' from '%s': %v", repoName, repoUrl, err) + } + } +} diff --git a/tests/load-tests/pkg/loadtestutils/userutils.go b/tests/load-tests/pkg/loadtestutils/userutils.go index 9b2c01450e..6a509df5c9 100644 --- a/tests/load-tests/pkg/loadtestutils/userutils.go +++ b/tests/load-tests/pkg/loadtestutils/userutils.go @@ -1,17 +1,16 @@ package loadtestutils import "encoding/json" +import "fmt" import "os" import "path/filepath" // Represents a user in the list of precreated users (e.g. Stage 'users.json') type User struct { - Username string `json:"username"` - Password string `json:"password"` - Token string `json:"token"` - SSOURL string `json:"ssourl"` - APIURL string `json:"apiurl"` - Verified bool `json:"verified"` + Namespace string `json:"namespace"` + Token string `json:"token"` + APIURL string `json:"apiurl"` + Verified bool `json:"verified"` } // Load 'users.json' into a slice of User structs @@ -27,5 +26,13 @@ func LoadStageUsers(filePath string) ([]User, error) { if err != nil { return nil, err } + + // Some sanity checks + if len(users) == 0 { + return nil, fmt.Errorf("Loaded %s but no users in there", filePath) + } + if users[0].APIURL == "" || users[0].Token == "" || users[0].Namespace == "" { + return nil, fmt.Errorf("Loaded %s but some expected field missing in first user", filePath) + } return users, nil } diff --git a/tests/load-tests/pkg/logging/logging.go b/tests/load-tests/pkg/logging/logging.go index eeb07cd2cb..172df41a21 100644 --- a/tests/load-tests/pkg/logging/logging.go +++ b/tests/load-tests/pkg/logging/logging.go @@ -10,7 +10,6 @@ var DEBUG = 1 var INFO = 2 var WARNING = 3 var ERROR = 4 -var FATAL = 5 var Logger = logger{} @@ -55,9 +54,8 @@ func (l *logger) Error(msg string, params ...interface{}) { } func (l *logger) Fatal(msg string, params ...interface{}) { - if l.Level <= FATAL { - klog.Fatalf("FATAL "+msg, params...) - } + MeasurementsStop() + klog.Fatalf("FATAL "+msg, params...) } // Log test failure with error code to CSV file so we can compile a statistic later diff --git a/tests/load-tests/pkg/logging/time_and_log.go b/tests/load-tests/pkg/logging/time_and_log.go index 9aba84f0db..7e1af7c41c 100644 --- a/tests/load-tests/pkg/logging/time_and_log.go +++ b/tests/load-tests/pkg/logging/time_and_log.go @@ -11,11 +11,13 @@ import "os" import "encoding/csv" import "sync" +import types "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/types" + var measurementsQueue chan MeasurementEntry // channel to send measurements to -var errorsQueue chan ErrorEntry // chanel to send failures to +var errorsQueue chan ErrorEntry // chanel to send failures to var measurementsOutput string // path to CSV where to save measurements -var errorsOutput string // path to CSV where to save measurements +var errorsOutput string // path to CSV where to save measurements var writerWaitGroup sync.WaitGroup @@ -23,16 +25,20 @@ var batchSize int // when we accumulate this many of records, we dump them to CS // Represents the data about measurement we want to store to CSV type MeasurementEntry struct { - Timestamp time.Time - Metric string - Duration time.Duration - Parameters string - Error error + Timestamp time.Time + PerUserId int + PerAppId int + PerCompId int + RepeatsCounter int + Metric string + Duration time.Duration + Parameters string + Error error } // Helper function to convert struct to slice of string which is needed when converting to CSV func (e *MeasurementEntry) GetSliceOfStrings() []string { - return []string{e.Timestamp.Format(time.RFC3339Nano), e.Metric, fmt.Sprintf("%f", e.Duration.Seconds()), e.Parameters, fmt.Sprintf("%v", e.Error)} + return []string{e.Timestamp.Format(time.RFC3339Nano), fmt.Sprintf("%d", e.PerUserId), fmt.Sprintf("%d", e.PerAppId), fmt.Sprintf("%d", e.PerCompId), fmt.Sprintf("%d", e.RepeatsCounter), e.Metric, fmt.Sprintf("%f", e.Duration.Seconds()), e.Parameters, fmt.Sprintf("%v", e.Error)} } // Represents the data about failure we want to store to CSV @@ -47,7 +53,6 @@ func (e *ErrorEntry) GetSliceOfStrings() []string { return []string{e.Timestamp.Format(time.RFC3339Nano), fmt.Sprintf("%d", e.Code), e.Message} } - // Initialize channels and start functions that are processing records func MeasurementsStart(directory string) { batchSize = 3 @@ -165,8 +170,29 @@ func errorsWriter() { // Measure duration of a given function run with given parameters and return what function returned // This only returns first (data) and last (error) returned value. Maybe this // can be generalized completely, but it is good enough for our needs. -func Measure(fn interface{}, params ...interface{}) (interface{}, error) { +func Measure(ctx interface{}, fn interface{}, params ...interface{}) (interface{}, error) { funcValue := reflect.ValueOf(fn) + perUserId := -1 + perAppId := -1 + perCompId := -1 + repeatsCounter := -1 + + // Extract additional metadata about this function call from provided context. + if casted, ok := ctx.(*types.PerUserContext); ok { + perUserId = casted.UserIndex + repeatsCounter = casted.JourneyRepeatsCounter + } + if casted, ok := ctx.(*types.PerApplicationContext); ok { + perUserId = casted.ParentContext.UserIndex + perAppId = casted.ApplicationIndex + repeatsCounter = casted.JourneyRepeatIndex + } + if casted, ok := ctx.(*types.PerComponentContext); ok { + perUserId = casted.ParentContext.ParentContext.UserIndex + perAppId = casted.ParentContext.ApplicationIndex + perCompId = casted.ComponentIndex + repeatsCounter = casted.ParentContext.JourneyRepeatIndex + } // Construct arguments for the function call numParams := len(params) @@ -182,8 +208,10 @@ func Measure(fn interface{}, params ...interface{}) (interface{}, error) { paramsStorable := make(map[string]string) for i := 0; i < numParams; i++ { x := 1 + key := fmt.Sprintf("%v", reflect.TypeOf(params[i])) value := fmt.Sprintf("%+v", reflect.ValueOf(params[i])) + for { keyFull := key + fmt.Sprint(x) if _, ok := paramsStorable[keyFull]; !ok { @@ -204,7 +232,7 @@ func Measure(fn interface{}, params ...interface{}) (interface{}, error) { defer func() { elapsed := time.Since(startTime) - LogMeasurement(funcName, paramsStorable, elapsed, fmt.Sprintf("%+v", resultInterValue), errInterValue) + LogMeasurement(funcName, perUserId, perAppId, perCompId, repeatsCounter, paramsStorable, elapsed, fmt.Sprintf("%+v", resultInterValue), errInterValue) }() // Call the function with provided arguments @@ -224,7 +252,7 @@ func Measure(fn interface{}, params ...interface{}) (interface{}, error) { } // Store given measurement -func LogMeasurement(metric string, params map[string]string, elapsed time.Duration, result string, err error) { +func LogMeasurement(metric string, perUserId, perAppId, perCompId, repeatsCounter int, params map[string]string, elapsed time.Duration, result string, err error) { // Extract parameter keys into a slice so we can sort them var paramsKeys []string for k := range params { @@ -245,13 +273,17 @@ func LogMeasurement(metric string, params map[string]string, elapsed time.Durati } params_string = strings.TrimLeft(params_string, " ") - Logger.Trace("Measured function: %s, Duration: %s, Params: %s, Result: %s, Error: %v\n", metric, elapsed, params_string, result, err) + Logger.Trace("Measured function: %s, Thread: %d/%d/%d, Repeat: %d, Duration: %s, Params: %s, Result: %s, Error: %v\n", metric, perUserId, perAppId, perCompId, repeatsCounter, elapsed, params_string, result, err) data := MeasurementEntry{ - Timestamp: time.Now(), - Metric: metric, - Duration: elapsed, - Parameters: params_string, - Error: err, + Timestamp: time.Now(), + Metric: metric, + PerUserId: perUserId, + PerAppId: perAppId, + PerCompId: perCompId, + RepeatsCounter: repeatsCounter, + Duration: elapsed, + Parameters: params_string, + Error: err, } measurementsQueue <- data } diff --git a/tests/load-tests/pkg/options/options.go b/tests/load-tests/pkg/options/options.go index 61b54da0de..6ae0fcae52 100644 --- a/tests/load-tests/pkg/options/options.go +++ b/tests/load-tests/pkg/options/options.go @@ -4,38 +4,55 @@ import "encoding/json" import "fmt" import "os" import "time" +import "strings" +import "sync" // Struct to hold command line options type Opts struct { - ApplicationsCount int - BuildPipelineSelectorBundle string - ComponentContainerContext string - ComponentContainerFile string - ComponentRepoRevision string - ComponentRepoUrl string - ComponentsCount int - Concurrency int - FailFast bool - JourneyDuration string - JourneyRepeats int - JourneyUntil time.Time - LogDebug bool - LogTrace bool - LogInfo bool - OutputDir string - PipelineMintmakerDisabled bool - PipelineRepoTemplating bool - PipelineImagePullSecrets []string - Purge bool - PurgeOnly bool - QuayRepo string - Stage bool - TestScenarioGitURL string - TestScenarioPathInRepo string - TestScenarioRevision string - UsernamePrefix string - WaitIntegrationTestsPipelines bool - WaitPipelines bool + ApplicationsCount int + BuildPipelineSelectorBundle string + ComponentContainerContext string + ComponentContainerFile string + ComponentRepoRevision string + ComponentRepoUrl string + ComponentsCount int + Concurrency int + FailFast bool + ForkTarget string + JourneyDuration string + JourneyRepeats int + JourneyUntil time.Time + JourneyReuseApplications bool + JourneyReuseComponents bool + LogDebug bool + LogInfo bool + LogTrace bool + OutputDir string + PipelineImagePullSecrets []string + PipelineMintmakerDisabled bool + PipelineRepoTemplating bool + PipelineRepoTemplatingSourceDir string + PipelineRepoTemplatingSource string + Purge bool + PurgeOnly bool + QuayRepo string + ReleasePipelinePath string + ReleasePipelineRevision string + ReleasePipelineServiceAccount string + ReleasePipelineUrl string + ReleasePolicy string + RunPrefix string + SerializeComponentOnboarding bool + SerializeComponentOnboardingLock sync.Mutex + Stage bool + StartupDelay time.Duration + StartupJitter time.Duration + TestScenarioGitURL string + TestScenarioPathInRepo string + TestScenarioRevision string + WaitIntegrationTestsPipelines bool + WaitPipelines bool + WaitRelease bool } // Pre-process load-test options before running the test @@ -52,6 +69,45 @@ func (o *Opts) ProcessOptions() error { o.Purge = true } + // If we are templating, set default values for relevant options if empty + if o.PipelineRepoTemplating { + if o.PipelineRepoTemplatingSource == "" { + o.PipelineRepoTemplatingSource = o.ComponentRepoUrl + } + if o.PipelineRepoTemplatingSourceDir == "" { + o.PipelineRepoTemplatingSourceDir = ".template/" + } + if !strings.HasSuffix(o.PipelineRepoTemplatingSourceDir, "/") { + o.PipelineRepoTemplatingSourceDir = o.PipelineRepoTemplatingSourceDir + "/" + } + } + + // If forking target directory was empty, use MY_GITHUB_ORG env variable + if o.ForkTarget == "" { + o.ForkTarget = os.Getenv("MY_GITHUB_ORG") + if o.ForkTarget == "" { + return fmt.Errorf("Was not able to get fork target") + } + } + + // If startup delay specified, make sure jitter is not bigger than 2 * delay + if o.StartupDelay != 0 { + if o.StartupJitter > o.StartupDelay * 2 { + fmt.Print("Warning: Lowering startup jitter as it was bigger than delay\n") + o.StartupJitter = o.StartupDelay * 2 + } + } + + // If we are supposed to reuse components on additional journeys, we have to reuse applications + if o.JourneyRepeats > 1 { + if o.JourneyReuseComponents { + if ! o.JourneyReuseApplications { + fmt.Print("Warning: We are supposed to reuse components so will reuse applications as well\n") + o.JourneyReuseApplications = true + } + } + } + // Convert options struct to pretty JSON jsonOptions, err2 := json.MarshalIndent(o, "", " ") if err2 != nil { diff --git a/tests/load-tests/pkg/types/types.go b/tests/load-tests/pkg/types/types.go new file mode 100644 index 0000000000..f4a8f1232b --- /dev/null +++ b/tests/load-tests/pkg/types/types.go @@ -0,0 +1,50 @@ +package types + +import "sync" +import "time" + +import framework "github.com/konflux-ci/e2e-tests/pkg/framework" +import loadtestutils "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/loadtestutils" +import options "github.com/konflux-ci/e2e-tests/tests/load-tests/pkg/options" + +// Struct to hold user journey thread data +type PerUserContext struct { + PerUserWG *sync.WaitGroup + UserIndex int + StartupPause time.Duration + JourneyRepeatsCounter int + Opts *options.Opts + StageUsers *[]loadtestutils.User + Framework *framework.Framework + Username string + Namespace string + ComponentRepoUrl string // overrides same value from Opts, needed when templating repos + PerApplicationContexts []*PerApplicationContext +} + +// Struct to hold data for thread to process each application +type PerApplicationContext struct { + PerApplicationWG *sync.WaitGroup + ApplicationIndex int + JourneyRepeatIndex int + StartupPause time.Duration + Framework *framework.Framework + ParentContext *PerUserContext + ApplicationName string + IntegrationTestScenarioName string + ReleasePlanName string + ReleasePlanAdmissionName string + PerComponentContexts []*PerComponentContext +} + +// Struct to hold data for thread to process each component +type PerComponentContext struct { + PerComponentWG *sync.WaitGroup + ComponentIndex int + StartupPause time.Duration + Framework *framework.Framework + ParentContext *PerApplicationContext + ComponentName string + SnapshotName string + ReleaseName string +} diff --git a/tests/load-tests/run-max-concurrency.sh b/tests/load-tests/run-max-concurrency.sh index 84b9e236af..3c92480e16 100755 --- a/tests/load-tests/run-max-concurrency.sh +++ b/tests/load-tests/run-max-concurrency.sh @@ -52,7 +52,7 @@ load_test() { rm -rvf "$workdir/load-test.log" options="" - [[ -n "${PIPELINE_IMAGE_PULL_SECRETS:-}" ]] && options="$options --pipeline-image-pull-secrets $PIPELINE_IMAGE_PULL_SECRETS" + [[ -n "${PIPELINE_IMAGE_PULL_SECRETS:-}" ]] && for s in $PIPELINE_IMAGE_PULL_SECRETS; do options="$options --pipeline-image-pull-secrets $s"; done date -Ins --utc >started go run loadtest.go \ diff --git a/tests/load-tests/run-stage-max-concurrency.sh b/tests/load-tests/run-stage-max-concurrency.sh index 7d68bd2f16..36cc703e5c 100755 --- a/tests/load-tests/run-stage-max-concurrency.sh +++ b/tests/load-tests/run-stage-max-concurrency.sh @@ -50,7 +50,7 @@ load_test() { rm -rvf "$workdir/load-test.log" options="" - [[ -n "${PIPELINE_IMAGE_PULL_SECRETS:-}" ]] && options="$options --pipeline-image-pull-secrets $PIPELINE_IMAGE_PULL_SECRETS" + [[ -n "${PIPELINE_IMAGE_PULL_SECRETS:-}" ]] && for s in $PIPELINE_IMAGE_PULL_SECRETS; do options="$options --pipeline-image-pull-secrets $s"; done date -Ins --utc >started go run loadtest.go \ diff --git a/tests/load-tests/run-stage.sh b/tests/load-tests/run-stage.sh index 46f114039f..1a8b13439e 100755 --- a/tests/load-tests/run-stage.sh +++ b/tests/load-tests/run-stage.sh @@ -5,8 +5,9 @@ set -o errexit set -o pipefail options="" -[[ -n "${PIPELINE_IMAGE_PULL_SECRETS:-}" ]] && options="$options --pipeline-image-pull-secrets $PIPELINE_IMAGE_PULL_SECRETS" +[[ -n "${PIPELINE_IMAGE_PULL_SECRETS:-}" ]] && for s in $PIPELINE_IMAGE_PULL_SECRETS; do options="$options --pipeline-image-pull-secrets $s"; done +trap "date -Ins --utc >ended" EXIT date -Ins --utc >started go run loadtest.go \ --applications-count "${APPLICATIONS_COUNT:-1}" \ @@ -15,19 +16,27 @@ go run loadtest.go \ --component-repo-revision "${COMPONENT_REPO_REVISION:-main}" \ --components-count "${COMPONENTS_COUNT:-1}" \ --concurrency "${CONCURRENCY:-1}" \ + --fork-target "${FORK_TARGET:-}" \ --journey-duration "${JOURNEY_DURATION:-1h}" \ --journey-repeats "${JOURNEY_REPEATS:-1}" \ --log-"${LOGGING_LEVEL:-info}" \ --pipeline-repo-templating="${PIPELINE_REPO_TEMPLATING:-false}" \ + --pipeline-repo-templating-source="${PIPELINE_REPO_TEMPLATING_SOURCE:-}" \ + --pipeline-repo-templating-source-dir="${PIPELINE_REPO_TEMPLATING_SOURCE_DIR:-}" \ --output-dir "${OUTPUT_DIR:-.}" \ --purge="${PURGE:-true}" \ --quay-repo "${QUAY_REPO:-redhat-user-workloads-stage}" \ - --test-scenario-git-url "${TEST_SCENARIO_GIT_URL:-https://github.com/konflux-ci/integration-examples.git}" \ - --test-scenario-path-in-repo "${TEST_SCENARIO_PATH_IN_REPO:-pipelines/integration_resolver_pipeline_pass.yaml}" \ - --test-scenario-revision "${TEST_SCENARIO_REVISION:-main}" \ - --username "${USER_PREFIX:-undef}" \ + --test-scenario-git-url "${TEST_SCENARIO_GIT_URL-https://github.com/konflux-ci/integration-examples.git}" \ + --test-scenario-path-in-repo "${TEST_SCENARIO_PATH_IN_REPO-pipelines/integration_resolver_pipeline_pass.yaml}" \ + --test-scenario-revision "${TEST_SCENARIO_REVISION-main}" \ + --release-policy "${RELEASE_POLICY-}" \ + --release-pipeline-url "${RELEASE_PIPELINE_URL:-https://github.com/konflux-ci/release-service-catalog.git}" \ + --release-pipeline-revision "${RELEASE_PIPELINE_REVISION:-production}" \ + --release-pipeline-path "${RELEASE_PIPELINE_PATH:-pipelines/managed/e2e/e2e.yaml}" \ + --release-pipeline-service-account "${RELEASE_PIPELINE_SERVICE_ACCOUNT:-release-serviceaccount}" \ + --runprefix "${USER_PREFIX:-undef}" \ --waitintegrationtestspipelines="${WAIT_INTEGRATION_TESTS:-true}" \ --waitpipelines="${WAIT_PIPELINES:-true}" \ + --waitrelease="${WAIT_RELEASE:-true}" \ $options \ --stage -date -Ins --utc >ended diff --git a/tests/load-tests/run.sh b/tests/load-tests/run.sh index 6d59021f4f..428d29c75b 100755 --- a/tests/load-tests/run.sh +++ b/tests/load-tests/run.sh @@ -61,7 +61,7 @@ fi ## Run the actual load test options="" -[[ -n "${PIPELINE_IMAGE_PULL_SECRETS:-}" ]] && options="$options --pipeline-image-pull-secrets $PIPELINE_IMAGE_PULL_SECRETS" +[[ -n "${PIPELINE_IMAGE_PULL_SECRETS:-}" ]] && for s in $PIPELINE_IMAGE_PULL_SECRETS; do options="$options --pipeline-image-pull-secrets $s"; done date -Ins --utc >started go run loadtest.go \ --applications-count "${APPLICATIONS_COUNT:-1}" \ @@ -72,16 +72,19 @@ go run loadtest.go \ --component-repo-revision "${COMPONENT_REPO_REVISION:-main}" \ --components-count "${COMPONENTS_COUNT:-1}" \ --concurrency "${CONCURRENCY:-1}" \ + --fork-target "${FORK_TARGET:-}" \ --journey-duration "${JOURNEY_DURATION:-1h}" \ --journey-repeats "${JOURNEY_REPEATS:-1}" \ --log-"${LOGGING_LEVEL:-info}" \ --pipeline-repo-templating="${PIPELINE_REPO_TEMPLATING:-false}" \ + --pipeline-repo-templating-source="${PIPELINE_REPO_TEMPLATING_SOURCE:-}" \ + --pipeline-repo-templating-source-dir="${PIPELINE_REPO_TEMPLATING_SOURCE_DIR:-}" \ --output-dir "${OUTPUT_DIR:-.}" \ --purge="${PURGE:-true}" \ --quay-repo "${QUAY_REPO:-stonesoup_perfscale}" \ - --test-scenario-git-url "${TEST_SCENARIO_GIT_URL:-https://github.com/konflux-ci/integration-examples.git}" \ - --test-scenario-path-in-repo "${TEST_SCENARIO_PATH_IN_REPO:-pipelines/integration_resolver_pipeline_pass.yaml}" \ - --test-scenario-revision "${TEST_SCENARIO_REVISION:-main}" \ + --test-scenario-git-url "${TEST_SCENARIO_GIT_URL-https://github.com/konflux-ci/integration-examples.git}" \ + --test-scenario-path-in-repo "${TEST_SCENARIO_PATH_IN_REPO-pipelines/integration_resolver_pipeline_pass.yaml}" \ + --test-scenario-revision "${TEST_SCENARIO_REVISION-main}" \ --username "$USER_PREFIX" \ --waitintegrationtestspipelines="${WAIT_INTEGRATION_TESTS:-true}" \ --waitpipelines="${WAIT_PIPELINES:-true}" \ diff --git a/tests/release/releaseLib.go b/tests/release/releaseLib.go index bb65864c9a..3b4c763845 100644 --- a/tests/release/releaseLib.go +++ b/tests/release/releaseLib.go @@ -30,9 +30,8 @@ func NewFramework(workspace string) *framework.Framework { var fw *framework.Framework var err error stageOptions := utils.Options{ - ToolchainApiUrl: os.Getenv(constants.TOOLCHAIN_API_URL_ENV), - KeycloakUrl: os.Getenv(constants.KEYLOAK_URL_ENV), - OfflineToken: os.Getenv(constants.OFFLINE_TOKEN_ENV), + ApiUrl: os.Getenv(constants.TOOLCHAIN_API_URL_ENV), + Token: os.Getenv(constants.OFFLINE_TOKEN_ENV), } fw, err = framework.NewFrameworkWithTimeout( @@ -78,7 +77,7 @@ func CreateComponent(devFw framework.Framework, devNamespace, appName, compName, }, }, } - component, err := devFw.AsKubeAdmin.HasController.CreateComponent(componentObj, devNamespace, "", "", appName, true, buildPipelineBundle) + component, err := devFw.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, devNamespace, "", "", appName, true, buildPipelineBundle) Expect(err).NotTo(HaveOccurred()) return component } @@ -116,7 +115,7 @@ func CreateComponentWithNewBranch(f framework.Framework, testNamespace, applicat }, } - testComponent, err := f.AsKubeAdmin.HasController.CreateComponent(componentObj, testNamespace, "", "", applicationName, true, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) + testComponent, err := f.AsKubeAdmin.HasController.CreateComponentCheckImageRepository(componentObj, testNamespace, "", "", applicationName, true, utils.MergeMaps(utils.MergeMaps(constants.ComponentPaCRequestAnnotation, constants.ImageControllerAnnotationRequestPublicRepo), buildPipelineAnnotation)) Expect(err).NotTo(HaveOccurred()) return testComponent, testPacBranchName, componentBaseBranchName