diff --git a/.golangci.yaml b/.golangci.yaml index 6801e08d..9c6c70dc 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,3 +1,4 @@ +version: "2" run: cache-dir: .golangci-lint-cache enable-cache: true @@ -6,6 +7,7 @@ run: tests: true build-tags: - e2e + modules-download-mode: readonly outputs: print-linter-name: true linters-settings: @@ -19,13 +21,14 @@ linters: enable: - forbidigo - misspell - - gofmt - - goimports - errcheck - - typecheck - unparam - gocritic - gosec +formatters: + enable: + - gofmt + - goimports issues: max-issues-per-linter: 0 max-same-issues: 0 diff --git a/.tekton/acceptance-tests-pr.yaml b/.tekton/acceptance-tests-pr.yaml index 5910ea8c..1be33130 100644 --- a/.tekton/acceptance-tests-pr.yaml +++ b/.tekton/acceptance-tests-pr.yaml @@ -6,7 +6,7 @@ metadata: annotations: pipelinesascode.tekton.dev/max-keep-runs: "2" pipelinesascode.tekton.dev/on-event: "[pull_request]" - pipelinesascode.tekton.dev/on-target-branch: "[*]" + pipelinesascode.tekton.dev/on-target-branch: "[master]" pipelinesascode.tekton.dev/pipeline: "https://gitlab.cee.redhat.com/tekton/plumbing/-/raw/master/ci/pipelines/acceptance-tests.yaml" pipelinesascode.tekton.dev/task-10: "https://gitlab.cee.redhat.com/tekton/plumbing/-/raw/master/ci/tasks/cli-tests-in-container.yaml" pipelinesascode.tekton.dev/task-20: "https://gitlab.cee.redhat.com/tekton/plumbing/-/raw/master/ci/tasks/configure-operator.yaml" @@ -41,6 +41,8 @@ spec: value: custom-operators - name: CLI_TESTS_TYPE value: downstream + - name: CLUSTER_LIFETIME + value: 2h - name: CLUSTER_POOL value: "openstack" - name: GIT_RELEASE_TESTS_BRANCH diff --git a/.tekton/go-lint-pipelinerun.yaml b/.tekton/go-lint-pipelinerun.yaml index a8e0bb16..3ca4127d 100644 --- a/.tekton/go-lint-pipelinerun.yaml +++ b/.tekton/go-lint-pipelinerun.yaml @@ -9,6 +9,9 @@ metadata: pipelinesascode.tekton.dev/task: "git-clone" pipelinesascode.tekton.dev/task-1: "golangci-lint" pipelinesascode.tekton.dev/max-keep-runs: "5" + # Expression to check for .go files inside pkg & steps directory + pipelinesascode.tekton.dev/on-cel-expression: | + "*/*/*.go".pathChanged() || "*/*.go".pathChanged() spec: params: - name: repo_url diff --git a/Dockerfile.CI b/Dockerfile.CI index 3c208174..02df841b 100644 --- a/Dockerfile.CI +++ b/Dockerfile.CI @@ -1,15 +1,19 @@ -FROM quay.io/fedora/fedora:40 +FROM quay.io/fedora/fedora:42 RUN dnf update -y &&\ - dnf install -y --setopt=tsflags=nodocs azure-cli git go jq lynx make openssl python-unversioned-command python3 python3-antlr4-runtime python3-pip unzip vim wget &&\ + dnf install -y --setopt=tsflags=nodocs azure-cli git go jq make openssl python-unversioned-command python3 python3-antlr4-runtime python3-pip skopeo unzip vim wget &&\ dnf clean all -y && rm -fR /var/cache/dnf RUN pip install pyyaml reportportal-client -RUN wget https://github.com/mikefarah/yq/releases/download/v4.44.3/yq_linux_amd64 -O /usr/bin/yq &&\ +RUN wget https://certs.corp.redhat.com/certs/Current-IT-Root-CAs.pem \ + -O /etc/pki/ca-trust/source/anchors/Current-IT-Root-CAs.pem && \ + update-ca-trust extract + +RUN wget https://github.com/mikefarah/yq/releases/download/v4.45.1/yq_linux_amd64 -O /usr/bin/yq &&\ chmod +x /usr/bin/yq -ENV OC_VERSION=4.17 +ENV OC_VERSION=4.18 RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/fast-${OC_VERSION}/openshift-client-linux.tar.gz \ -O /tmp/openshift-client.tar.gz &&\ tar xzf /tmp/openshift-client.tar.gz -C /usr/bin oc &&\ @@ -33,7 +37,7 @@ RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/rosa/latest/rosa- tar xzf /tmp/rosa.tar.gz -C /usr/bin --no-same-owner rosa &&\ rm /tmp/rosa.tar.gz -ENV TKN_VERSION=1.16.0 +ENV TKN_VERSION=1.17.0 RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/pipelines/${TKN_VERSION}/tkn-linux-amd64.tar.gz \ -O /tmp/tkn.tar.gz &&\ tar xzf /tmp/tkn.tar.gz -C /usr/bin --no-same-owner tkn tkn-pac opc &&\ @@ -42,7 +46,7 @@ RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/pipelines/${TKN_V RUN wget https://dl.min.io/client/mc/release/linux-amd64/mc -O /usr/bin/mc &&\ chmod u+x /usr/bin/mc -ENV GAUGE_VERSION=1.6.10 +ENV GAUGE_VERSION=1.6.14 RUN wget https://github.com/getgauge/gauge/releases/download/v${GAUGE_VERSION}/gauge-${GAUGE_VERSION}-linux.x86_64.zip \ -O /tmp/gauge.zip &&\ unzip /tmp/gauge.zip gauge -d /usr/bin &&\ @@ -51,20 +55,20 @@ RUN wget https://github.com/getgauge/gauge/releases/download/v${GAUGE_VERSION}/g gauge install go &&\ gauge install html-report &&\ gauge install xml-report &&\ - gauge install reportportal &&\ + gauge install reportportal -v 1.16.2 &&\ gauge config check_updates false &&\ gauge config runner_connection_timeout 600000 && \ gauge config runner_request_timeout 300000 &&\ go env -w GOPROXY="https://proxy.golang.org,direct" &&\ gauge version -RUN wget https://github.com/sigstore/cosign/releases/download/v2.4.1/cosign-linux-amd64 -O /usr/bin/cosign && \ +RUN wget https://github.com/sigstore/cosign/releases/download/v2.5.0/cosign-linux-amd64 -O /usr/bin/cosign && \ chmod a+x /usr/bin/cosign -RUN wget https://github.com/sigstore/rekor/releases/download/v1.3.6/rekor-cli-linux-amd64 -O /usr/bin/rekor-cli && \ +RUN wget https://github.com/sigstore/rekor/releases/download/v1.3.10/rekor-cli-linux-amd64 -O /usr/bin/rekor-cli && \ chmod u+x /usr/bin/rekor-cli -ENV GOLANGCI_LINT_VERSION=1.61.0 +ENV GOLANGCI_LINT_VERSION=2.1.5 RUN wget -O /tmp/golangci-lint.tar.gz https://github.com/golangci/golangci-lint/releases/download/v${GOLANGCI_LINT_VERSION}/golangci-lint-${GOLANGCI_LINT_VERSION}-linux-amd64.tar.gz \ && tar --strip-components=1 -C /usr/bin -xzf /tmp/golangci-lint.tar.gz golangci-lint-${GOLANGCI_LINT_VERSION}-linux-amd64/golangci-lint \ && rm -f /tmp/golangci-lint.tar.gz diff --git a/README.md b/README.md index 4a5e9069..cf0b6974 100644 --- a/README.md +++ b/README.md @@ -88,12 +88,34 @@ gauge run --log-level=debug --verbose --tags 'e2e & !skip_linux/amd64' specs/clu gauge run --log-level=debug --verbose --tags e2e specs/pac/pac-gitlab.spec ``` -## Running PAC GitLab Tests: -Before running PAC GitLab tests, Configure the environment variable from `/spec/pac/README.md`. +## Running PAC GitLab Tests +Pipelines as code is a project allowing you to define your CI/CD using Tekton PipelineRuns and Tasks in a file located in your source control management (SCM) system, such as GitHub or GitLab. This file is then used to automatically create a pipeline for a Pull Request or a Push to a branch. + +### Setting up PAC in GitLab + +- Create a New project in gitlab.com +- Change the visibility of the project to Public +- Change the main branch to unprotect under `Settings --> Repository --> Protected branches` +- Copy the project ID by clicking on three dots in project root directory and`export GITLAB_PROJECT_ID=` +- Click on your profile under `preferences` Under `User Settings --> Access tokens` +- Create a New Personal Access Token and `export GITLAB_TOKEN=` +- Create a new Public Group in GitLab and Copy the only the Group name from URL e.g: From GitLab URL `https://gitlab.com/groups/test324345` Copy only the group name `test324345` and `export GITLAB_GROUP_NAMESPACE=` +- Enter any WebhookSecret to be used for GitLab webhook `export GITLAB_WEBHOOK_TOKEN=` + +### Running PAC E2E tests +Export the following Env Variables ``` -gauge run --log-level=debug --verbose --tags e2e specs/pac/pac-gitlab.spec +export GITLAB_TOKEN= +export GITLAB_PROJECT_ID= +export GITLAB_GROUP_NAMESPACE= +export GITLAB_WEBHOOK_TOKEN= ``` +To run pac e2e tests... + +``` +gauge run --log-level=debug --verbose --tags e2e specs/pac/pac-gitlab.spec +``` ## Authoring a new test specification 1. Create or update a spec file in `specs` directory using `Markdown` syntax. diff --git a/env/default/default.properties b/env/default/default.properties index 74ba6cc0..11284f69 100644 --- a/env/default/default.properties +++ b/env/default/default.properties @@ -34,6 +34,9 @@ runner_connection_timeout = 3600000 # language runner returns the response message. runner_request_timeout = 3600000 +# Disable screenshot on failure error: screenshot.png not found +screenshot_on_failure = false + enable_multithreading = false CHANNEL = latest CATALOG_SOURCE = redhat-operators @@ -42,12 +45,12 @@ SUBSCRIPTION_NAME = openshift-pipelines-operator-rh CHAINS_VERSION = v0.23 HUB_VERSION = v1.19 MANUAL_APPROVAL_VERSION = v0.4 -OPERATOR_VERSION = devel -OSP_VERSION = 5.0.5 -OSP_TUTORIAL_BRANCH = master +OPERATOR_VERSION = v0.74 +OSP_VERSION = 1.17 +OSP_TUTORIAL_BRANCH = pipelines-1.17 PAC_VERSION = 0.29 PIPELINE_VERSION = v0.65 RESULTS_VERSION = v0.13 TARGET = openshift -TKN_CLIENT_VERSION = 0.32 +TKN_CLIENT_VERSION = 0.39 TRIGGERS_VERSION = v0.30 diff --git a/go.mod b/go.mod index 22574e50..c0662049 100644 --- a/go.mod +++ b/go.mod @@ -1,26 +1,28 @@ module github.com/openshift-pipelines/release-tests -go 1.22.5 +go 1.22.7 -toolchain go1.22.7 +toolchain go1.23.5 require ( github.com/getgauge-contrib/gauge-go v0.4.0 github.com/google/go-cmp v0.6.0 github.com/openshift-pipelines/manual-approval-gate v0.2.2 + github.com/openshift-pipelines/pipelines-as-code v0.27.2 github.com/openshift/api v0.0.0-20240521185306-0314f31e7774 github.com/openshift/client-go v0.0.0-20240523113335-452272e0496d github.com/operator-framework/api v0.16.0 github.com/operator-framework/operator-lifecycle-manager v0.22.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.2 github.com/prometheus/common v0.55.0 github.com/tektoncd/cli v0.38.1 - github.com/tektoncd/operator v0.73.1 - github.com/tektoncd/pipeline v0.62.3 - github.com/tektoncd/triggers v0.29.1 - github.com/xanzy/go-gitlab v0.107.0 + github.com/tektoncd/operator v0.74.1 + github.com/tektoncd/pipeline v0.65.1 + github.com/tektoncd/triggers v0.30.0 + github.com/xanzy/go-gitlab v0.109.0 go.opencensus.io v0.24.0 + gopkg.in/yaml.v2 v2.4.0 gotest.tools/v3 v3.5.1 k8s.io/api v0.30.0 k8s.io/apimachinery v0.30.0 @@ -69,14 +71,14 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/cel-go v0.20.1 // indirect + github.com/google/cel-go v0.21.0 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -90,6 +92,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/ktr0731/go-ansisgr v0.1.0 // indirect github.com/ktr0731/go-fuzzyfinder v0.8.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect @@ -106,7 +109,6 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nsf/termbox-go v1.1.1 // indirect - github.com/openshift-pipelines/pipelines-as-code v0.27.2 // indirect github.com/openshift/apiserver-library-go v0.0.0-20230816171015-6bfafa975bfb // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_model v0.6.1 // indirect @@ -125,25 +127,24 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/time v0.5.0 // indirect + golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.190.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/api v0.196.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.67.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.29.8 // indirect + k8s.io/apiextensions-apiserver v0.29.10 // indirect k8s.io/cli-runtime v0.29.8 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect diff --git a/go.sum b/go.sum index 2bbdfff0..f19eab0e 100644 --- a/go.sum +++ b/go.sum @@ -742,8 +742,8 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= -github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -832,8 +832,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= -github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/google/cel-go v0.21.0 h1:cl6uW/gxN+Hy50tNYvI691+sXxioCnstFzLp2WO4GCI= +github.com/google/cel-go v0.21.0/go.mod h1:rHUlWCcBKgyEk+eV03RPdZUekPp6YcJwV0FxuUksYxc= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= @@ -859,6 +859,8 @@ github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/go-github/v31 v31.0.0 h1:JJUxlP9lFK+ziXKimTCprajMApV1ecWD4NB6CCb0plo= github.com/google/go-github/v31 v31.0.0/go.mod h1:NQPZol8/1sMoWYGN2yaALIBytu17gAWfhbweiEed3pM= +github.com/google/go-github/v61 v61.0.0 h1:VwQCBwhyE9JclCI+22/7mLB1PuU9eowCXKY5pNlu1go= +github.com/google/go-github/v61 v61.0.0/go.mod h1:0WR+KmsWX75G2EbpyGsGmradjo3IiciuI4BmdVCobQY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -930,8 +932,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4G github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b h1:wDUNC2eKiL35DbLvsDhiblTUXHxcOPwQSCzi7xpQUN4= github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b/go.mod h1:VzxiSdG6j1pi7rwGm/xYI5RbtpBgM8sARDXlvEvxlu0= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -987,8 +989,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1008,6 +1010,8 @@ github.com/ktr0731/go-ansisgr v0.1.0 h1:fbuupput8739hQbEmZn1cEKjqQFwtCCZNznnF6AN github.com/ktr0731/go-ansisgr v0.1.0/go.mod h1:G9lxwgBwH0iey0Dw5YQd7n6PmQTwTuTM/X5Sgm/UrzE= github.com/ktr0731/go-fuzzyfinder v0.8.0 h1:+yobwo9lqZZ7jd1URPdCgZXTE2U1mpIVTkQoo4roi6w= github.com/ktr0731/go-fuzzyfinder v0.8.0/go.mod h1:Bjpz5im+tppKE9Ii6UK1h+6RaX/lUvJ0ruO4LIYRkqo= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -1147,8 +1151,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1194,8 +1198,8 @@ github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbm github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/sigstore/sigstore v1.8.8 h1:B6ZQPBKK7Z7tO3bjLNnlCMG+H66tO4E/+qAphX8T/hg= -github.com/sigstore/sigstore v1.8.8/go.mod h1:GW0GgJSCTBJY3fUOuGDHeFWcD++c4G8Y9K015pwcpDI= +github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= +github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -1232,12 +1236,12 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/tektoncd/cli v0.38.1 h1:dp3k1Ad6ao5HcDR6LHdIA3KgOgztu4deJpil3iDl76U= github.com/tektoncd/cli v0.38.1/go.mod h1:5RELvDGAhFxntKnCVZbEOJ+a1bEIVTPNivg1gcdfxKo= -github.com/tektoncd/operator v0.73.1 h1:Bra3Ao2hm1+WwuLsrNLfnu8f/eNQLh1E1/ffKKImToY= -github.com/tektoncd/operator v0.73.1/go.mod h1:1kQICwpfWNbKPiSVXHV3V043VtXkW4jNmtByd+ohlgs= -github.com/tektoncd/pipeline v0.62.3 h1:hR6UKjwzChW+MNG41yjfTKiVW9xet8jbJS59tsIY7bc= -github.com/tektoncd/pipeline v0.62.3/go.mod h1:cYPH4n3X8t39arNMhgyU7swyv3hVeWToz1yYDRzTLT8= -github.com/tektoncd/triggers v0.29.1 h1:UXqjJICaRsWYb0qkIYOUlqaDR5te9Zmfrz93+TXy3ug= -github.com/tektoncd/triggers v0.29.1/go.mod h1:yVNxCSlYw//uKoXDi4kzzwYGkK2KIYLt6FwwSTz0aj8= +github.com/tektoncd/operator v0.74.1 h1:LP8NUjwbJX0XdVuohSRX6nEvHsClZkZFbfmyVWoeuf8= +github.com/tektoncd/operator v0.74.1/go.mod h1:M3HSLCnQv9GrgBdLyYURzFd6vYAGrn3v/ReLrs3gxrA= +github.com/tektoncd/pipeline v0.65.1 h1:7Ee/nqG+QWE25NGzwKZdFE0p5COb/aljfDysUFv8+0o= +github.com/tektoncd/pipeline v0.65.1/go.mod h1:V3cyfxxc7b3GLT2a13GX2mWA86qmxWhh4mOp4gfFQwQ= +github.com/tektoncd/triggers v0.30.0 h1:1RV3yxRlEN565qHYG8vIKyfrU3QVZkPuv67qurLeSYg= +github.com/tektoncd/triggers v0.30.0/go.mod h1:YkhGaFuL+z4aErBHz66di1dwuDjowmryTq6OAfQvpus= github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -1252,10 +1256,8 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/xanzy/go-gitlab v0.102.0 h1:ExHuJ1OTQ2yt25zBMMj0G96ChBirGYv8U7HyUiYkZ+4= -github.com/xanzy/go-gitlab v0.102.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= -github.com/xanzy/go-gitlab v0.107.0 h1:P2CT9Uy9yN9lJo3FLxpMZ4xj6uWcpnigXsjvqJ6nd2Y= -github.com/xanzy/go-gitlab v0.107.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= +github.com/xanzy/go-gitlab v0.109.0 h1:RcRme5w8VpLXTSTTMZdVoQWY37qTJWg+gwdQl4aAttE= +github.com/xanzy/go-gitlab v0.109.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1311,8 +1313,8 @@ golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1328,8 +1330,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= -golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1377,8 +1379,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1454,8 +1456,8 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1487,8 +1489,8 @@ golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1508,8 +1510,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1616,8 +1618,8 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1636,8 +1638,8 @@ golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1658,16 +1660,16 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1825,8 +1827,8 @@ google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjY google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q= -google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo= +google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= +google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1974,15 +1976,15 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go. google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2024,8 +2026,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/pkg/clients/clients.go b/pkg/clients/clients.go index 04c67b2d..ec3d8856 100644 --- a/pkg/clients/clients.go +++ b/pkg/clients/clients.go @@ -76,17 +76,17 @@ func NewClients(configPath string, clusterName, namespace string) (*Clients, err clients.Dynamic, err = dynamic.NewForConfig(clients.KubeConfig) if err != nil { - return nil, fmt.Errorf("Failed to create dynamic clients from config file at %s: %s", configPath, err) + return nil, fmt.Errorf("failed to create dynamic clients from config file at %s: %s", configPath, err) } clients.Operator, err = newTektonOperatorAlphaClients(clients.KubeConfig) if err != nil { - return nil, fmt.Errorf("Failed to create Operator v1alpha1 clients from config file at %s: %s", configPath, err) + return nil, fmt.Errorf("failed to create Operator v1alpha1 clients from config file at %s: %s", configPath, err) } clients.OLM, err = olmversioned.NewForConfig(clients.KubeConfig) if err != nil { - return nil, fmt.Errorf("Failed to create olm clients from config file at %s: %s", configPath, err) + return nil, fmt.Errorf("failed to create olm clients from config file at %s: %s", configPath, err) } clients.Tekton, err = pversioned.NewForConfig(clients.KubeConfig) @@ -95,17 +95,17 @@ func NewClients(configPath string, clusterName, namespace string) (*Clients, err } if err != nil { - return nil, fmt.Errorf("Failed to create resource clientset from config file at %s: %s", configPath, err) + return nil, fmt.Errorf("failed to create resource clientset from config file at %s: %s", configPath, err) } clients.TriggersClient, err = triggersclientset.NewForConfig(clients.KubeConfig) if err != nil { - return nil, fmt.Errorf("Failed to create triggers clientset from config file at %s: %s", configPath, err) + return nil, fmt.Errorf("failed to create triggers clientset from config file at %s: %s", configPath, err) } clients.PacClientset, err = pacclientset.NewForConfig(clients.KubeConfig) if err != nil { - return nil, fmt.Errorf("Failed to create pac clientset from config file at %s: %s", configPath, err) + return nil, fmt.Errorf("failed to create pac clientset from config file at %s: %s", configPath, err) } clients.NewClientSet(namespace) return clients, nil diff --git a/pkg/cmd/cmd.go b/pkg/cmd/cmd.go index 0cd01399..e3f521a6 100644 --- a/pkg/cmd/cmd.go +++ b/pkg/cmd/cmd.go @@ -23,11 +23,11 @@ type testsuitAdaptor struct{} var _ assert.TestingT = (*testsuitAdaptor)(nil) func (ta testsuitAdaptor) Fail() { - testsuit.T.Fail(fmt.Errorf("Step failed execute")) + testsuit.T.Fail(fmt.Errorf("step failed execute")) } func (ta testsuitAdaptor) FailNow() { - testsuit.T.Fail(fmt.Errorf("Step failed to execute")) + testsuit.T.Fail(fmt.Errorf("step failed to execute")) } func (ta testsuitAdaptor) Log(args ...interface{}) { diff --git a/pkg/config/config.go b/pkg/config/config.go index b21509d8..c0a4dffa 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -23,7 +23,7 @@ const ( // when waiting for a specific condition to be true. APITimeout = time.Minute * 10 // CLITimeout defines the amount of maximum execution time for CLI commands - CLITimeout = time.Second * 40 + CLITimeout = time.Second * 90 // ConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satisfied for this long. ConsistentlyDuration = 30 * time.Second @@ -72,9 +72,6 @@ const ( // Name of console deployment ConsolePluginDeployment = "pipelines-console-plugin" - // Community Clustertasks - CommunityClustertasks = "jib-maven,helm-upgrade-from-source,helm-upgrade-from-repo,trigger-jenkins-job,git-cli,pull-request,kubeconfig-creator,argocd-task-sync-and-wait" - // A token used in triggers tests TriggersSecretToken = "1234567" ) @@ -222,7 +219,7 @@ func Read(path string) ([]byte, error) { func TempDir() (string, error) { tmp := filepath.Join(Dir(), "..", "tmp") if _, err := os.Stat(tmp); os.IsNotExist(err) { - err := os.Mkdir(tmp, 0755) + err := os.Mkdir(tmp, 0750) return tmp, err } return tmp, nil diff --git a/pkg/k8s/k8s.go b/pkg/k8s/k8s.go index fdf0add6..936e86c1 100644 --- a/pkg/k8s/k8s.go +++ b/pkg/k8s/k8s.go @@ -43,6 +43,7 @@ func NewClientSet() (*clients.Clients, string, func()) { } oc.CreateNewProject(ns) + return cs, ns, func() { oc.DeleteProjectIgnoreErors(ns) } @@ -399,7 +400,7 @@ func AssertCronjobPresent(c *clients.Clients, cronJobName, namespace string) { return false, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: cronjob with prefix %v present in namespace %v, Actual: cronjob with prefix %v not present in namespace %v", cronJobName, namespace, cronJobName, namespace)) + testsuit.T.Fail(fmt.Errorf("expected: cronjob with prefix %v present in namespace %v, Actual: cronjob with prefix %v not present in namespace %v", cronJobName, namespace, cronJobName, namespace)) } log.Printf("Cronjob with prefix %v is present in namespace %v", cronJobName, namespace) } @@ -419,7 +420,7 @@ func AssertCronjobNotPresent(c *clients.Clients, cronJobName, namespace string) return true, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: cronjob with prefix %v present in namespace %v, Actual: cronjob with prefix %v not present in namespace %v", cronJobName, namespace, cronJobName, namespace)) + testsuit.T.Fail(fmt.Errorf("expected: cronjob with prefix %v present in namespace %v, Actual: cronjob with prefix %v not present in namespace %v", cronJobName, namespace, cronJobName, namespace)) } log.Printf("Cronjob with prefix %v is present in namespace %v", cronJobName, namespace) } @@ -428,7 +429,7 @@ func ValidateTektonInstallersetStatus(c *clients.Clients) { tis, err := c.Operator.TektonInstallerSets().List(c.Ctx, metav1.ListOptions{}) failedInstallersets := make([]string, 0) if err != nil { - testsuit.T.Fail(fmt.Errorf("Error getting tektoninstallersets: %v", err)) + testsuit.T.Fail(fmt.Errorf("error getting tektoninstallersets: %v", err)) } for _, is := range tis.Items { @@ -439,7 +440,7 @@ func ValidateTektonInstallersetStatus(c *clients.Clients) { } if len(failedInstallersets) > 0 { - testsuit.T.Fail(fmt.Errorf("The installersets %s is/are not in ready status", strings.Join(failedInstallersets, ","))) + testsuit.T.Fail(fmt.Errorf("the installersets %s is/are not in ready status", strings.Join(failedInstallersets, ","))) } log.Print("All the installersets are in ready state") } @@ -447,7 +448,7 @@ func ValidateTektonInstallersetStatus(c *clients.Clients) { func ValidateTektonInstallersetNames(c *clients.Clients) { tis, err := c.Operator.TektonInstallerSets().List(c.Ctx, metav1.ListOptions{}) if err != nil { - testsuit.T.Fail(fmt.Errorf("Error getting tektoninstallersets: %v", err)) + testsuit.T.Fail(fmt.Errorf("error getting tektoninstallersets: %v", err)) } missingInstallersets := make([]string, 0) for _, isp := range config.TektonInstallersetNamePrefixes { @@ -478,7 +479,7 @@ func ValidateTektonInstallersetNames(c *clients.Clients) { } if len(missingInstallersets) > 0 { - testsuit.T.Fail(fmt.Errorf("Installersets with prefix %s is not found", strings.Join(missingInstallersets, ","))) + testsuit.T.Fail(fmt.Errorf("installersets with prefix %s is not found", strings.Join(missingInstallersets, ","))) } } diff --git a/pkg/oc/oc.go b/pkg/oc/oc.go index 4743817a..8f7f6673 100644 --- a/pkg/oc/oc.go +++ b/pkg/oc/oc.go @@ -5,17 +5,17 @@ import ( "log" "slices" "strings" + "time" "github.com/getgauge-contrib/gauge-go/testsuit" "github.com/openshift-pipelines/release-tests/pkg/cmd" "github.com/openshift-pipelines/release-tests/pkg/config" - resource "github.com/openshift-pipelines/release-tests/pkg/config" "github.com/openshift-pipelines/release-tests/pkg/store" ) // Create resources using oc command func Create(path_dir, namespace string) { - log.Printf("output: %s\n", cmd.MustSucceed("oc", "create", "-f", resource.Path(path_dir), "-n", namespace).Stdout()) + log.Printf("output: %s\n", cmd.MustSucceed("oc", "create", "-f", config.Path(path_dir), "-n", namespace).Stdout()) } // Create resources using remote path using oc command @@ -24,12 +24,15 @@ func CreateRemote(remote_path, namespace string) { } func Apply(path_dir, namespace string) { - log.Printf("output: %s\n", cmd.MustSucceed("oc", "apply", "-f", resource.Path(path_dir), "-n", namespace).Stdout()) + log.Printf("output: %s\n", cmd.MustSucceed("oc", "apply", "-f", config.Path(path_dir), "-n", namespace).Stdout()) } // Delete resources using oc command func Delete(path_dir, namespace string) { - log.Printf("output: %s\n", cmd.MustSucceed("oc", "delete", "-f", resource.Path(path_dir), "-n", namespace).Stdout()) + // Tekton Results sets a finalizer that prevent resource removal for some time + // see parameters "store_deadline" and "forward_buffer" + // by default, it waits at least 150 seconds + log.Printf("output: %s\n", cmd.MustSuccedIncreasedTimeout(time.Second*300, "oc", "delete", "-f", config.Path(path_dir), "-n", namespace).Stdout()) } // CreateNewProject Helps you to create new project @@ -100,7 +103,10 @@ func LabelNamespace(namespace, label string) { } func DeleteResource(resourceType, name string) { - log.Printf("output: %s\n", cmd.MustSucceed("oc", "delete", resourceType, name, "-n", store.Namespace()).Stdout()) + // Tekton Results sets a finalizer that prevent resource removal for some time + // see parameters "store_deadline" and "forward_buffer" + // by default, it waits at least 150 seconds + log.Printf("output: %s\n", cmd.MustSuccedIncreasedTimeout(time.Second*300, "oc", "delete", resourceType, name, "-n", store.Namespace()).Stdout()) } func DeleteResourceInNamespace(resourceType, name, namespace string) { diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 0d080079..e235e46c 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -84,7 +84,7 @@ func ValidateHubDeployments(cs *clients.Clients, rnames utils.ResourceNames) { func ValidateManualApprovalGateDeployments(cs *clients.Clients, rnames utils.ResourceNames) { if _, err := approvalgate.EnsureManualApprovalGateExists(cs.ManualApprovalGate(), rnames); err != nil { - testsuit.T.Fail(fmt.Errorf("Manual approval gate doesn't exists\n %v", err)) + testsuit.T.Fail(fmt.Errorf("manual approval gate doesn't exists\n %v", err)) } k8s.ValidateDeployments(cs, rnames.TargetNamespace, config.MAGController, config.MAGWebHook) diff --git a/pkg/operator/rbac.go b/pkg/operator/rbac.go index f6f97bf2..2b6cb7e1 100644 --- a/pkg/operator/rbac.go +++ b/pkg/operator/rbac.go @@ -28,7 +28,7 @@ func AssertServiceAccountPresent(clients *clients.Clients, ns, targetSA string) return false, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: Service account %v present in the namespace %v, Actual: Service account %v not present in the namespace %v, Error: %v", targetSA, ns, targetSA, ns, err)) + testsuit.T.Fail(fmt.Errorf("expected: Service account %v present in the namespace %v, Actual: Service account %v not present in the namespace %v, Error: %v", targetSA, ns, targetSA, ns, err)) } } func AssertRoleBindingPresent(clients *clients.Clients, ns, roleBindingName string) { @@ -46,7 +46,7 @@ func AssertRoleBindingPresent(clients *clients.Clients, ns, roleBindingName stri return false, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: Rolebinding %v present in the namespace %v, Actual: Rolebinding %v not present in the namespace %v, Error: %v", roleBindingName, ns, roleBindingName, ns, err)) + testsuit.T.Fail(fmt.Errorf("expected: Rolebinding %v present in the namespace %v, Actual: Rolebinding %v not present in the namespace %v, Error: %v", roleBindingName, ns, roleBindingName, ns, err)) } } @@ -65,7 +65,7 @@ func AssertConfigMapPresent(clients *clients.Clients, ns, configMapName string) return false, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: Configmap %v present in the namespace %v, Actual: Configmap %v not present in the namespace %v, Error: %v", configMapName, ns, configMapName, ns, err)) + testsuit.T.Fail(fmt.Errorf("expected: Configmap %v present in the namespace %v, Actual: Configmap %v not present in the namespace %v, Error: %v", configMapName, ns, configMapName, ns, err)) } } @@ -84,7 +84,7 @@ func AssertClusterRolePresent(clients *clients.Clients, clusterRoleName string) return false, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: Clusterrole %v present, Actual: Clusterrole %v not present, Error: %v", clusterRoleName, clusterRoleName, err)) + testsuit.T.Fail(fmt.Errorf("expected: Clusterrole %v present, Actual: Clusterrole %v not present, Error: %v", clusterRoleName, clusterRoleName, err)) } } @@ -103,7 +103,7 @@ func AssertServiceAccountNotPresent(clients *clients.Clients, ns, targetSA strin return true, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: Service account %v not present in the namespace %v, Actual: Service account %v is present in the namespace %v, Error: %v", targetSA, ns, targetSA, ns, err)) + testsuit.T.Fail(fmt.Errorf("expected: Service account %v not present in the namespace %v, Actual: Service account %v is present in the namespace %v, Error: %v", targetSA, ns, targetSA, ns, err)) } } @@ -122,7 +122,7 @@ func AssertRoleBindingNotPresent(clients *clients.Clients, ns, roleBindingName s return true, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: Rolebinding %v not present in the namespace %v, Actual: Rolebinding %v present in the namespace %v, Error: %v", roleBindingName, ns, roleBindingName, ns, err)) + testsuit.T.Fail(fmt.Errorf("expected: Rolebinding %v not present in the namespace %v, Actual: Rolebinding %v present in the namespace %v, Error: %v", roleBindingName, ns, roleBindingName, ns, err)) } } @@ -141,7 +141,7 @@ func AssertConfigMapNotPresent(clients *clients.Clients, ns, configMapName strin return true, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: Configmap %v not present in the namespace %v, Expected: Configmap %v present in the namespace %v, Error: %v", configMapName, ns, configMapName, ns, err)) + testsuit.T.Fail(fmt.Errorf("expected: Configmap %v not present in the namespace %v, Expected: Configmap %v present in the namespace %v, Error: %v", configMapName, ns, configMapName, ns, err)) } } @@ -160,7 +160,7 @@ func AssertClusterRoleNotPresent(clients *clients.Clients, clusterRoleName strin return true, err }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected, Clusterrole %v not present, Actual: Clusterrole %v present, Error: %v", clusterRoleName, clusterRoleName, err)) + testsuit.T.Fail(fmt.Errorf("expected, Clusterrole %v not present, Actual: Clusterrole %v present, Error: %v", clusterRoleName, clusterRoleName, err)) } } @@ -180,7 +180,7 @@ func AssertSCCPresent(clients *clients.Clients, sccName string) { return false, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: security context constraint %q present, Actual: security context constraint %q not present , Error: %v", sccName, sccName, err)) + testsuit.T.Fail(fmt.Errorf("expected: security context constraint %q present, Actual: security context constraint %q not present , Error: %v", sccName, sccName, err)) } } @@ -200,6 +200,6 @@ func AssertSCCNotPresent(clients *clients.Clients, sccName string) { return true, err }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Expected: security context constraint %q not present, Actual: security context constraint %q present, Error: %v", sccName, sccName, err)) + testsuit.T.Fail(fmt.Errorf("expected: security context constraint %q not present, Actual: security context constraint %q present, Error: %v", sccName, sccName, err)) } } diff --git a/pkg/operator/tektonaddons.go b/pkg/operator/tektonaddons.go index aafd7563..160a7c3d 100644 --- a/pkg/operator/tektonaddons.go +++ b/pkg/operator/tektonaddons.go @@ -21,9 +21,12 @@ import ( "errors" "fmt" "log" + "os" + "strings" "github.com/getgauge-contrib/gauge-go/testsuit" "github.com/openshift-pipelines/release-tests/pkg/clients" + "github.com/openshift-pipelines/release-tests/pkg/cmd" "github.com/openshift-pipelines/release-tests/pkg/config" "knative.dev/pkg/test/logging" @@ -124,7 +127,7 @@ func TektonAddonCRDelete(clients *clients.Clients, crNames utils.ResourceNames) return false, err }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Timed out waiting on TektonAddon to delete, Error: %v", err)) + testsuit.T.Fail(fmt.Errorf("timed out waiting on TektonAddon to delete, Error: %v", err)) } err = verifyNoTektonAddonCR(clients) @@ -139,7 +142,66 @@ func verifyNoTektonAddonCR(clients *clients.Clients) error { return err } if len(addons.Items) > 0 { - return errors.New("Unable to verify cluster-scoped resources are deleted if any TektonAddon exists") + return errors.New("unable to verify cluster-scoped resources are deleted if any TektonAddon exists") } return nil } + +// VerifyVersionedTasks checks if the required tasks are available with the expected version +func VerifyVersionedTasks() { + taskList := cmd.MustSucceed("oc", "get", "task", "-n", "openshift-pipelines").Stdout() + requiredTasks := []string{"buildah", "git-cli", "git-clone", "maven", "openshift-client", "s2i-dotnet", "s2i-go", "s2i-java", "s2i-nodejs", "s2i-perl", "s2i-php", "s2i-python", "s2i-ruby", "skopeo-copy", "tkn"} + expectedVersion := os.Getenv("OSP_VERSION") + + // Get the arch of the cluster as kn and Kn-apply task are not available on arm64 cluster + if config.Flags.ClusterArch != "arm64" { + requiredTasks = append(requiredTasks, "kn", "kn-apply") + } + + if expectedVersion == "" { + testsuit.T.Errorf("OSP_VERSION is not set. Cannot determine the required version for tasks.") + return + } + + // Remove z-stream version from OSP_VERSION + versionParts := strings.Split(expectedVersion, ".") + if len(versionParts) < 2 { + testsuit.T.Errorf("Invalid OSP_VERSION Version: %s", expectedVersion) + return + } + requiredVersion := versionParts[0] + "-" + versionParts[1] + "-0" + + for _, task := range requiredTasks { + taskWithVersion := task + "-" + requiredVersion + if !strings.Contains(taskList, taskWithVersion) { + testsuit.T.Errorf("Task %s not found in namespace openshift-pipelines", taskWithVersion) + } + } +} + +// VerifyVersionedStepActions checks if the required actions are available with the expected version +func VerifyVersionedStepActions() { + stepActionList := cmd.MustSucceed("oc", "get", "stepaction", "-n", "openshift-pipelines").Stdout() + requiredStepActions := []string{"git-clone"} + expectedVersion := os.Getenv("OSP_VERSION") + + if expectedVersion == "" { + testsuit.T.Errorf("OSP_VERSION is not set. Cannot determine the required version for tasks.") + return + } + + // Remove z-stream version from OSP_VERSION + versionParts := strings.Split(expectedVersion, ".") + if len(versionParts) < 2 { + testsuit.T.Errorf("Invalid OSP_VERSION Version: %s", expectedVersion) + return + } + requiredVersion := versionParts[0] + "-" + versionParts[1] + "-0" + + for _, stepAction := range requiredStepActions { + stepActionWithVersion := stepAction + "-" + requiredVersion + if !strings.Contains(stepActionList, stepActionWithVersion) { + testsuit.T.Errorf("Step action %s not found in namespace openshift-pipelines", stepActionWithVersion) + } + } +} diff --git a/pkg/operator/tektonchains.go b/pkg/operator/tektonchains.go index 67fb2968..0e2708ea 100644 --- a/pkg/operator/tektonchains.go +++ b/pkg/operator/tektonchains.go @@ -30,7 +30,6 @@ import ( "github.com/getgauge-contrib/gauge-go/testsuit" "github.com/openshift-pipelines/release-tests/pkg/cmd" "github.com/openshift-pipelines/release-tests/pkg/config" - resource "github.com/openshift-pipelines/release-tests/pkg/config" "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" chainv1alpha "github.com/tektoncd/operator/pkg/client/clientset/versioned/typed/operator/v1alpha1" "github.com/tektoncd/operator/test/utils" @@ -41,7 +40,7 @@ import ( // "quay.io/openshift-pipeline/chainstest" var repo string = os.Getenv("CHAINS_REPOSITORY") -var publicKeyPath = resource.Path("testdata/chains/key") +var publicKeyPath = config.Path("testdata/chains/key") func EnsureTektonChainsExists(clients chainv1alpha.TektonChainInterface, names utils.ResourceNames) (*v1alpha1.TektonChain, error) { ks, err := clients.Get(context.TODO(), names.TektonChain, metav1.GetOptions{}) @@ -77,7 +76,7 @@ func VerifySignature(resourceType string) { testsuit.T.Errorf("Annotation chains.tekton.dev/signed is set to %s", isSigned) } if len(signature) == 0 { - testsuit.T.Fail(fmt.Errorf("Annotation chains.tekton.dev/signature-%s-%s is not set", resourceType, resourceUID)) + testsuit.T.Fail(fmt.Errorf("annotation chains.tekton.dev/signature-%s-%s is not set", resourceType, resourceUID)) } // Decode the signature @@ -90,6 +89,7 @@ func VerifySignature(resourceType string) { if err != nil { testsuit.T.Errorf("Error creating file") } + //nolint:errcheck defer file.Close() _, err = file.WriteString(string(decodedSignature)) if err != nil { @@ -100,7 +100,7 @@ func VerifySignature(resourceType string) { } func StartKanikoTask() { - var tag string = time.Now().Format("060102150405") + var tag = time.Now().Format("060102150405") cmd.MustSucceed("oc", "secrets", "link", "pipeline", "chains-image-registry-credentials", "--for=pull,mount") image := fmt.Sprintf("IMAGE=%s:%s", repo, tag) cmd.MustSucceed("tkn", "task", "start", "--param", image, "--use-param-defaults", "--workspace", "name=source,claimName=chains-pvc", "--workspace", "name=dockerconfig,secret=chains-image-registry-credentials", "kaniko-chains") @@ -176,11 +176,12 @@ func CreateFileWithCosignPubKey() { if err != nil { testsuit.T.Errorf("Error decoding base64") } - filepath := filepath.Join(publicKeyPath, "cosign.pub") - file, err := os.Create(filepath) + fullPath := filepath.Join(publicKeyPath, "cosign.pub") + file, err := os.Create(filepath.Clean(fullPath)) if err != nil { testsuit.T.Errorf("Error creating file") } + //nolint:errcheck defer file.Close() _, err = file.WriteString(string(decodedPublicKey)) if err != nil { @@ -196,7 +197,10 @@ func CreateSigningSecretForTektonChains() { chainsPassword = os.Getenv("COSIGN_PASSWORD") cmd.MustSucceed("oc", "create", "secret", "generic", "signing-secrets", "--from-literal=cosign.key="+chainsPrivateKey, "--from-literal=cosign.password="+chainsPassword, "--from-literal=cosign.pub="+chainsPublicKey, "--namespace", "openshift-pipelines") } else { - os.Setenv("COSIGN_PASSWORD", "chainstest") + err := os.Setenv("COSIGN_PASSWORD", "chainstest") + if err != nil { + testsuit.T.Errorf("Error setting environment variable COSIGN_PASSWORD") + } cmd.MustSucceed("cosign", "generate-key-pair", "k8s://openshift-pipelines/signing-secrets") } } diff --git a/pkg/operator/tektonconfig.go b/pkg/operator/tektonconfig.go index 55bb729f..9f6d4b1d 100644 --- a/pkg/operator/tektonconfig.go +++ b/pkg/operator/tektonconfig.go @@ -123,7 +123,7 @@ func TektonConfigCRDelete(clients *clients.Clients, crNames utils.ResourceNames) return false, err }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Timed out waiting on TektonConfigCR to delete, Error: %v", err)) + testsuit.T.Fail(fmt.Errorf("timed out waiting on TektonConfigCR to delete, Error: %v", err)) } err = verifyNoTektonConfigCR(clients) if err != nil { @@ -137,7 +137,7 @@ func verifyNoTektonConfigCR(clients *clients.Clients) error { return err } if len(configs.Items) > 0 { - return errors.New("Unable to verify cluster-scoped resources are deleted if any TektonConfig exists") + return errors.New("unable to verify cluster-scoped resources are deleted if any TektonConfig exists") } return nil } diff --git a/pkg/operator/tektonpipelines.go b/pkg/operator/tektonpipelines.go index b22b1e00..33c38faa 100644 --- a/pkg/operator/tektonpipelines.go +++ b/pkg/operator/tektonpipelines.go @@ -104,7 +104,7 @@ func TektonPipelineCRDelete(clients *clients.Clients, crNames utils.ResourceName return false, err }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Timed out waiting on TektonPipeline to delete, Error: %v", err)) + testsuit.T.Fail(fmt.Errorf("timed out waiting on TektonPipeline to delete, Error: %v", err)) } if err := verifyNoTektonPipelineCR(clients); err != nil { testsuit.T.Fail(err) @@ -117,7 +117,7 @@ func verifyNoTektonPipelineCR(clients *clients.Clients) error { return err } if len(pipelines.Items) > 0 { - return errors.New("Unable to verify cluster-scoped resources are deleted if any TektonPipeline exists") + return errors.New("unable to verify cluster-scoped resources are deleted if any TektonPipeline exists") } return nil } diff --git a/pkg/operator/tektonresults.go b/pkg/operator/tektonresults.go index 3ed185a6..6b1f474e 100644 --- a/pkg/operator/tektonresults.go +++ b/pkg/operator/tektonresults.go @@ -1,8 +1,9 @@ package operator import ( + "context" "fmt" - "strconv" + "log" "strings" "time" @@ -10,11 +11,16 @@ import ( "encoding/json" "github.com/getgauge-contrib/gauge-go/testsuit" + "github.com/openshift-pipelines/release-tests/pkg/clients" "github.com/openshift-pipelines/release-tests/pkg/cmd" + "github.com/openshift-pipelines/release-tests/pkg/config" + "github.com/openshift-pipelines/release-tests/pkg/store" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" ) func CreateSecretsForTektonResults() { - var password string = cmd.MustSucceed("openssl", "rand", "-base64", "20").Stdout() + var password = cmd.MustSucceed("openssl", "rand", "-base64", "20").Stdout() password = strings.ReplaceAll(password, "\n", "") cmd.MustSucceed("oc", "create", "secret", "-n", "openshift-pipelines", "generic", "tekton-results-postgres", "--from-literal=POSTGRES_USER=result", "--from-literal=POSTGRES_PASSWORD="+password) // generating tls certificate @@ -32,35 +38,63 @@ func CreateResultsRoute() { } func GetResultsApi() string { - var results_api string = cmd.MustSucceed("oc", "get", "route", "tekton-results-api-service", "-n", "openshift-pipelines", "--no-headers", "-o", "custom-columns=:spec.host").Stdout() + ":443" + var results_api = cmd.MustSucceed("oc", "get", "route", "tekton-results-api-service", "-n", "openshift-pipelines", "--no-headers", "-o", "custom-columns=:spec.host").Stdout() + ":443" results_api = strings.ReplaceAll(results_api, "\n", "") return results_api } func GetResultsAnnotations(resourceType string) (string, string, string) { - var result_uuid string = cmd.MustSucceed("tkn", resourceType, "describe", "--last", "-o", "jsonpath='{.metadata.annotations.results\\.tekton\\.dev/result}'").Stdout() - var record_uuid string = cmd.MustSucceed("tkn", resourceType, "describe", "--last", "-o", "jsonpath='{.metadata.annotations.results\\.tekton\\.dev/record}'").Stdout() - var stored string = cmd.MustSucceed("tkn", resourceType, "describe", "--last", "-o", "jsonpath='{.metadata.annotations.results\\.tekton\\.dev/stored}'").Stdout() + var result_uuid = cmd.MustSucceed("tkn", resourceType, "describe", "--last", "-o", "jsonpath='{.metadata.annotations.results\\.tekton\\.dev/result}'").Stdout() + var record_uuid = cmd.MustSucceed("tkn", resourceType, "describe", "--last", "-o", "jsonpath='{.metadata.annotations.results\\.tekton\\.dev/record}'").Stdout() + var stored = cmd.MustSucceed("tkn", resourceType, "describe", "--last", "-o", "jsonpath='{.metadata.annotations.results\\.tekton\\.dev/stored}'").Stdout() record_uuid = strings.ReplaceAll(record_uuid, "'", "") result_uuid = strings.ReplaceAll(result_uuid, "'", "") stored = strings.ReplaceAll(stored, "'", "") return result_uuid, record_uuid, stored } -func VerifyResultsStored(resourceType string) { - _, _, storedAnnotation := GetResultsAnnotations(resourceType) - - if storedAnnotation == "" { - testsuit.T.Fail(fmt.Errorf("Annotation results.tekton.dev/stored is not set")) +func getRunsAnnotations(cs *clients.Clients, resourceType, name string) (map[string]string, error) { + switch resourceType { + case "taskrun": + taskRun, err := cs.TaskRunClient.Get(cs.Ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return taskRun.GetAnnotations(), nil + case "pipelinerun": + pipelineRuns, err := cs.PipelineRunClient.Get(cs.Ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return pipelineRuns.GetAnnotations(), nil + default: + return nil, fmt.Errorf("invalid resource type: %s", resourceType) } +} - stored, err := strconv.ParseBool(storedAnnotation) +func VerifyResultsAnnotationStored(resourceType string) { + resourceName := cmd.MustSucceed("tkn", resourceType, "describe", "--last", "-o", "jsonpath='{.metadata.name}'").Stdout() + resourceName = strings.ReplaceAll(resourceName, "'", "") + cs := store.Clients() + + log.Printf("Waiting for annotation 'results.tekton.dev/stored' to be true \n") + err := wait.PollUntilContextTimeout(cs.Ctx, config.APIRetry, config.APITimeout, true, func(context.Context) (done bool, err error) { + annotations, err := getRunsAnnotations(cs, resourceType, resourceName) + if err != nil { + return false, err + } + if annotations == nil || annotations["results.tekton.dev/stored"] == "" { + log.Printf("Annotation 'results.tekton.dev/stored' is not set yet\n") + return false, nil + } + if annotations["results.tekton.dev/stored"] == "true" { + return true, nil + } + return false, nil + }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Annotation results.tekton.dev/record doesn't contain a boolean value")) - } - if !stored { - testsuit.T.Fail(fmt.Errorf("Annotation results.tekton.dev/record is set to false")) + testsuit.T.Fail(fmt.Errorf("annotation 'results.tekton.dev/stored' is not true: %v", err)) } } @@ -71,10 +105,10 @@ func VerifyResultsLogs(resourceType string) { results_api = GetResultsApi() if record_uuid == "" { - testsuit.T.Fail(fmt.Errorf("Annotation results.tekton.dev/record is not set")) + testsuit.T.Fail(fmt.Errorf("annotation results.tekton.dev/record is not set")) } - var resultsJsonData string = cmd.MustSucceed("opc", "results", "logs", "get", "--insecure", "--addr", results_api, record_uuid).Stdout() + var resultsJsonData = cmd.MustSucceed("opc", "results", "logs", "get", "--insecure", "--addr", results_api, record_uuid).Stdout() if strings.Contains(resultsJsonData, "record not found") { testsuit.T.Errorf("Results log not found") } else { @@ -102,7 +136,7 @@ func VerifyResultsRecords(resourceType string) { var results_api string _, record_uuid, _ = GetResultsAnnotations(resourceType) results_api = GetResultsApi() - var results_record string = cmd.MustSucceed("opc", "results", "records", "get", "--insecure", "--addr", results_api, record_uuid).Stdout() + var results_record = cmd.MustSucceed("opc", "results", "records", "get", "--insecure", "--addr", results_api, record_uuid).Stdout() if strings.Contains(results_record, "record not found") { testsuit.T.Errorf("Results record not found") } else { diff --git a/pkg/operator/tektontriggers.go b/pkg/operator/tektontriggers.go index 643f54f3..b97ced5f 100644 --- a/pkg/operator/tektontriggers.go +++ b/pkg/operator/tektontriggers.go @@ -101,7 +101,7 @@ func TektonTriggerCRDelete(clients *clients.Clients, crNames utils.ResourceNames return false, err }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Timed out waiting on TektonTrigger to delete, Error: %v", err)) + testsuit.T.Fail(fmt.Errorf("timed out waiting on TektonTrigger to delete, Error: %v", err)) } if err := verifyNoTektonTriggerCR(clients); err != nil { @@ -115,7 +115,7 @@ func verifyNoTektonTriggerCR(clients *clients.Clients) error { return err } if len(triggers.Items) > 0 { - return errors.New("Unable to verify cluster-scoped resources are deleted if any TektonTrigger exists") + return errors.New("unable to verify cluster-scoped resources are deleted if any TektonTrigger exists") } return nil } diff --git a/pkg/pipelines/clustertasks.go b/pkg/pipelines/ecosystem.go similarity index 64% rename from pkg/pipelines/clustertasks.go rename to pkg/pipelines/ecosystem.go index db48fa05..da85590a 100644 --- a/pkg/pipelines/clustertasks.go +++ b/pkg/pipelines/ecosystem.go @@ -12,66 +12,66 @@ import ( "k8s.io/apimachinery/pkg/util/wait" ) -func AssertClustertaskPresent(c *clients.Clients, clusterTaskName string) { +func AssertTaskPresent(c *clients.Clients, namespace string, taskName string) { err := wait.PollUntilContextTimeout(c.Ctx, config.APIRetry, config.ResourceTimeout, false, func(context.Context) (bool, error) { - log.Printf("Verifying if the clustertask %v is present", clusterTaskName) - _, err := c.ClustertaskClient.Get(c.Ctx, clusterTaskName, v1.GetOptions{}) + log.Printf("Verifying if the task %v is present", taskName) + _, err := c.Tekton.TektonV1().Tasks(namespace).Get(c.Ctx, taskName, v1.GetOptions{}) if err == nil { return true, nil } return false, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Clustertasks %v Expected: Present, Actual: Not Present, Error: %v", clusterTaskName, err)) + testsuit.T.Fail(fmt.Errorf("tasks %v Expected: Present, Actual: Not Present, Error: %v", taskName, err)) } else { - log.Printf("Clustertask %v is present", clusterTaskName) + log.Printf("Task %v is present", taskName) } } -func AssertClustertaskNotPresent(c *clients.Clients, clusterTaskName string) { +func AssertTaskNotPresent(c *clients.Clients, namespace string, taskName string) { err := wait.PollUntilContextTimeout(c.Ctx, config.APIRetry, config.ResourceTimeout, false, func(context.Context) (bool, error) { - log.Printf("Verifying if the clustertask %v is not present", clusterTaskName) - _, err := c.ClustertaskClient.Get(c.Ctx, clusterTaskName, v1.GetOptions{}) + log.Printf("Verifying if the task %v is not present", taskName) + _, err := c.Tekton.TektonV1().Tasks(namespace).Get(c.Ctx, taskName, v1.GetOptions{}) if err == nil { return false, nil } return true, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Clustertasks %v Expected: Not Present, Actual: Present, Error: %v", clusterTaskName, err)) + testsuit.T.Fail(fmt.Errorf("tasks %v Expected: Not Present, Actual: Present, Error: %v", taskName, err)) } else { - log.Printf("Clustertask %v is not present", clusterTaskName) + log.Printf("Task %v is not present", taskName) } } -func AssertTaskPresent(c *clients.Clients, namespace string, taskName string) { +func AssertStepActionPresent(c *clients.Clients, namespace string, stepActionName string) { err := wait.PollUntilContextTimeout(c.Ctx, config.APIRetry, config.ResourceTimeout, false, func(context.Context) (bool, error) { - log.Printf("Verifying if the task %v is present", taskName) - _, err := c.Tekton.TektonV1().Tasks(namespace).Get(c.Ctx, taskName, v1.GetOptions{}) + log.Printf("Verifying if the stepAction %v is present", stepActionName) + _, err := c.Tekton.TektonV1beta1().StepActions(namespace).Get(c.Ctx, stepActionName, v1.GetOptions{}) if err == nil { return true, nil } return false, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Tasks %v Expected: Present, Actual: Not Present, Error: %v", taskName, err)) + testsuit.T.Fail(fmt.Errorf("StepAction %v Expected: Present, Actual: Not Present, Error: %v", stepActionName, err)) } else { - log.Printf("Task %v is present", taskName) + log.Printf("StepAction %v is present", stepActionName) } } -func AssertTaskNotPresent(c *clients.Clients, namespace string, taskName string) { +func AssertStepActionNotPresent(c *clients.Clients, namespace string, stepActionName string) { err := wait.PollUntilContextTimeout(c.Ctx, config.APIRetry, config.ResourceTimeout, false, func(context.Context) (bool, error) { - log.Printf("Verifying if the task %v is not present", taskName) - _, err := c.Tekton.TektonV1().Tasks(namespace).Get(c.Ctx, taskName, v1.GetOptions{}) + log.Printf("Verifying if the stepAction %v is not present", stepActionName) + _, err := c.Tekton.TektonV1beta1().StepActions(namespace).Get(c.Ctx, stepActionName, v1.GetOptions{}) if err == nil { return false, nil } return true, nil }) if err != nil { - testsuit.T.Fail(fmt.Errorf("Tasks %v Expected: Not Present, Actual: Present, Error: %v", taskName, err)) + testsuit.T.Fail(fmt.Errorf("StepAction %v Expected: Not Present, Actual: Present, Error: %v", stepActionName, err)) } else { - log.Printf("Task %v is not present", taskName) + log.Printf("StepAction %v is not present", stepActionName) } } diff --git a/pkg/pipelines/helper.go b/pkg/pipelines/helper.go index 1cd3dfbd..ba808934 100644 --- a/pkg/pipelines/helper.go +++ b/pkg/pipelines/helper.go @@ -3,7 +3,6 @@ package pipelines import ( "bytes" "fmt" - "strings" "github.com/getgauge-contrib/gauge-go/testsuit" "github.com/openshift-pipelines/release-tests/pkg/clients" @@ -14,109 +13,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// checkLabelPropagation checks that labels are correctly propagating from -// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkLabelPropagation(c *clients.Clients, namespace string, pipelineRunName string, tr *v1.TaskRun) { - // Our controllers add 4 labels automatically. If custom labels are set on - // the Pipeline, PipelineRun, or Task then the map will have to be resized. - labels := make(map[string]string, 4) - - // Check label propagation to PipelineRuns. - pr, err := c.PipelineRunClient.Get(c.Ctx, pipelineRunName, metav1.GetOptions{}) - if err != nil { - testsuit.T.Errorf("failed to get pipeline run for task run %s \n %v", tr.Name, err) - } - - p, err := c.PipelineClient.Get(c.Ctx, pr.Spec.PipelineRef.Name, metav1.GetOptions{}) - if err != nil { - testsuit.T.Errorf("failed to get pipeline for pipeline run %s \n %v", pr.Name, err) - } - - // By default, controller doesn't add any labels to Pipelines - for key, val := range p.ObjectMeta.Labels { - labels[key] = val - } - - // This label is added to every PipelineRun by the PipelineRun controller - labels[pipeline.PipelineLabelKey] = p.Name - AssertLabelsMatch(labels, pr.ObjectMeta.Labels) - - // Check label propagation to TaskRuns. - for key, val := range pr.ObjectMeta.Labels { - labels[key] = val - } - // This label is added to every TaskRun by the PipelineRun controller - labels[pipeline.PipelineRunLabelKey] = pr.Name - if tr.Spec.TaskRef != nil { - task, err := c.TaskClient.Get(c.Ctx, tr.Spec.TaskRef.Name, metav1.GetOptions{}) - if err != nil { - testsuit.T.Errorf("failed to get task for task run %s \n %v", tr.Name, err) - } - - // By default, controller doesn't add any labels to Tasks - for key, val := range task.ObjectMeta.Labels { - labels[key] = val - } - // This label is added to TaskRuns that reference a Task by the TaskRun controller - labels[pipeline.TaskLabelKey] = task.Name - } - AssertLabelsMatch(labels, tr.ObjectMeta.Labels) - - // PodName is "" if a retry happened and pod is deleted - // This label is added to every Pod by the TaskRun controller - if tr.Status.PodName != "" { - // Check label propagation to Pods. - pod := GetPodForTaskRun(c, namespace, tr) - // This label is added to every Pod by the TaskRun controller - labels[pipeline.TaskRunLabelKey] = tr.Name - AssertLabelsMatch(labels, pod.ObjectMeta.Labels) - } -} - -// checkAnnotationPropagation checks that annotations are correctly propagating from -// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkAnnotationPropagation(c *clients.Clients, namespace string, pipelineRunName string, tr *v1.TaskRun) { - annotations := make(map[string]string) - - // Check annotation propagation to PipelineRuns. - pr, err := c.PipelineRunClient.Get(c.Ctx, pipelineRunName, metav1.GetOptions{}) - if err != nil { - testsuit.T.Errorf("failed to get pipeline run for task run %s \n %v", tr.Name, err) - } - - p, err := c.PipelineClient.Get(c.Ctx, pr.Spec.PipelineRef.Name, metav1.GetOptions{}) - if err != nil { - testsuit.T.Errorf("failed to get pipeline for pipeline run %s \n %v", pr.Name, err) - } - - for key, val := range p.ObjectMeta.Annotations { - annotations[key] = val - } - AssertAnnotationsMatch(annotations, pr.ObjectMeta.Annotations) - - // Check annotation propagation to TaskRuns. - for key, val := range pr.ObjectMeta.Annotations { - // Annotations created by Chains are created after task runs finish - if !strings.HasPrefix(key, "chains.tekton.dev") && !strings.HasPrefix(key, "results.tekton.dev") { - annotations[key] = val - } - } - if tr.Spec.TaskRef != nil { - task, err := c.TaskClient.Get(c.Ctx, tr.Spec.TaskRef.Name, metav1.GetOptions{}) - if err != nil { - testsuit.T.Errorf("failed to get task for task run %s \n %v", tr.Name, err) - } - for key, val := range task.ObjectMeta.Annotations { - annotations[key] = val - } - } - AssertAnnotationsMatch(annotations, tr.ObjectMeta.Annotations) - - // Check annotation propagation to Pods. - pod := GetPodForTaskRun(c, namespace, tr) - AssertAnnotationsMatch(annotations, pod.ObjectMeta.Annotations) -} - func GetPodForTaskRun(c *clients.Clients, namespace string, tr *v1.TaskRun) *corev1.Pod { // The Pod name has a random suffix, so we filter by label to find the one we care about. pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(c.Ctx, metav1.ListOptions{ diff --git a/pkg/pipelines/pipelines.go b/pkg/pipelines/pipelines.go index 851da820..e003e26f 100644 --- a/pkg/pipelines/pipelines.go +++ b/pkg/pipelines/pipelines.go @@ -17,6 +17,7 @@ import ( "github.com/openshift-pipelines/release-tests/pkg/cmd" "github.com/openshift-pipelines/release-tests/pkg/config" "github.com/openshift-pipelines/release-tests/pkg/k8s" + "github.com/openshift-pipelines/release-tests/pkg/store" "github.com/openshift-pipelines/release-tests/pkg/wait" "github.com/tektoncd/cli/pkg/cli" clipr "github.com/tektoncd/cli/pkg/cmd/pipelinerun" @@ -32,7 +33,7 @@ import ( var prGroupResource = schema.GroupVersionResource{Group: "tekton.dev", Resource: "pipelineruns"} -func validatePipelineRunForSuccessStatus(c *clients.Clients, prname, labelCheck, namespace string) { +func validatePipelineRunForSuccessStatus(c *clients.Clients, prname, namespace string) { // Verify status of PipelineRun (wait for it) err := wait.WaitForPipelineRunState(c, prname, wait.PipelineRunSucceed(prname), "PipelineRunCompleted") if err != nil { @@ -54,29 +55,6 @@ func validatePipelineRunForSuccessStatus(c *clients.Clients, prname, labelCheck, } log.Printf("pipelineRun: %s is successful under namespace : %s", prname, namespace) - - if strings.ToLower(labelCheck) == "yes" || strings.ToLower(labelCheck) == "y" { - log.Println("Check for events, labels & annotations") - actualTaskrunList, err := c.TaskRunClient.List(c.Ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prname)}) - if err != nil { - testsuit.T.Errorf("failed to list task runs for pipeline run %s \n %v", prname, err) - } - - actualTaskRunNames := []string{} - for _, tr := range actualTaskrunList.Items { - actualTaskRunNames = append(actualTaskRunNames, tr.GetName()) - log.Printf("Checking that labels were propagated correctly for TaskRun %s", tr.Name) - trCopy := tr - checkLabelPropagation(c, namespace, prname, &trCopy) - log.Printf("Checking that annotations were propagated correctly for TaskRun %s", tr.Name) - checkAnnotationPropagation(c, namespace, prname, &trCopy) - } - - matchKinds := map[string][]string{"PipelineRun": {prname}, "TaskRun": actualTaskRunNames} - log.Printf("Making sure %d events were created from taskrun and pipelinerun with kinds %v", len(actualTaskRunNames)+1, matchKinds) - - // To-do fix: collect matching events - } } func validatePipelineRunForFailedStatus(c *clients.Clients, prname, namespace string) { @@ -198,7 +176,7 @@ func validatePipelineRunCancel(c *clients.Clients, prname, namespace string) { wg.Wait() } -func ValidatePipelineRun(c *clients.Clients, prname, status, labelCheck, namespace string) { +func ValidatePipelineRun(c *clients.Clients, prname, status, namespace string) { var err error pr, err := c.PipelineRunClient.Get(c.Ctx, prname, metav1.GetOptions{}) if err != nil { @@ -209,7 +187,7 @@ func ValidatePipelineRun(c *clients.Clients, prname, status, labelCheck, namespa switch { case strings.Contains(strings.ToLower(status), "success"): log.Printf("validating pipeline run %s for success state...", prname) - validatePipelineRunForSuccessStatus(c, pr.GetName(), labelCheck, namespace) + validatePipelineRunForSuccessStatus(c, pr.GetName(), namespace) case strings.Contains(strings.ToLower(status), "fail"): log.Printf("validating pipeline run %s for failure state...", prname) validatePipelineRunForFailedStatus(c, pr.GetName(), namespace) @@ -284,7 +262,7 @@ func AssertForNoNewPipelineRunCreation(c *clients.Clients, namespace string) { } func AssertNumberOfPipelineruns(c *clients.Clients, namespace, numberOfPr, timeoutSeconds string) { - log.Printf("Verifying if %s number of pipelinerun are present", numberOfPr) + log.Printf("Verifying if %s pipelineruns are present", numberOfPr) timeoutSecondsInt, _ := strconv.Atoi(timeoutSeconds) err := w.PollUntilContextTimeout(c.Ctx, config.APIRetry, time.Second*time.Duration(timeoutSecondsInt), false, func(context.Context) (bool, error) { prlist, err := c.PipelineRunClient.List(c.Ctx, metav1.ListOptions{}) @@ -296,12 +274,12 @@ func AssertNumberOfPipelineruns(c *clients.Clients, namespace, numberOfPr, timeo }) if err != nil { prlist, _ := c.PipelineRunClient.List(c.Ctx, metav1.ListOptions{}) - testsuit.T.Fail(fmt.Errorf("Error: Expected %v number of pipelineruns but found %v number of pipelineruns", numberOfPr, len(prlist.Items))) + testsuit.T.Fail(fmt.Errorf("error: Expected %v pipelineruns but found %v pipelineruns: %s", numberOfPr, len(prlist.Items), err)) } } func AssertNumberOfTaskruns(c *clients.Clients, namespace, numberOfTr, timeoutSeconds string) { - log.Printf("Verifying if %s number of taskruns are present", numberOfTr) + log.Printf("Verifying if %s taskruns are present", numberOfTr) timeoutSecondsInt, _ := strconv.Atoi(timeoutSeconds) err := w.PollUntilContextTimeout(c.Ctx, config.APIRetry, time.Second*time.Duration(timeoutSecondsInt), false, func(context.Context) (bool, error) { trlist, err := c.TaskRunClient.List(c.Ctx, metav1.ListOptions{}) @@ -313,7 +291,7 @@ func AssertNumberOfTaskruns(c *clients.Clients, namespace, numberOfTr, timeoutSe }) if err != nil { trlist, _ := c.TaskRunClient.List(c.Ctx, metav1.ListOptions{}) - testsuit.T.Fail(fmt.Errorf("Error: Expected %v number of taskruns but found %v number of taskruns", numberOfTr, len(trlist.Items))) + testsuit.T.Fail(fmt.Errorf("error: Expected %v taskruns but found %v taskruns: %s", numberOfTr, len(trlist.Items), err)) } } func AssertPipelinesPresent(c *clients.Clients, namespace string) { @@ -335,7 +313,7 @@ func AssertPipelinesPresent(c *clients.Clients, namespace string) { }) if err != nil { p, _ := pclient.List(c.Ctx, metav1.ListOptions{}) - testsuit.T.Fail(fmt.Errorf("Expected: %v pipelines present in namespace %v, Actual: %v pipelines present in namespace %v , Error: %v", expectedNumberOfPipelines, namespace, len(p.Items), namespace, err)) + testsuit.T.Fail(fmt.Errorf("expected: %v pipelines present in namespace %v, Actual: %v pipelines present in namespace %v , Error: %v", expectedNumberOfPipelines, namespace, len(p.Items), namespace, err)) } log.Printf("Pipelines are present in namespace %v", namespace) } @@ -352,7 +330,7 @@ func AssertPipelinesNotPresent(c *clients.Clients, namespace string) { }) if err != nil { p, _ := pclient.List(c.Ctx, metav1.ListOptions{}) - testsuit.T.Fail(fmt.Errorf("Expected: %v number of pipelines present in namespace %v, Actual: %v number of pipelines present in namespace %v , Error: %v", 0, namespace, len(p.Items), namespace, err)) + testsuit.T.Fail(fmt.Errorf("expected: %v number of pipelines present in namespace %v, Actual: %v number of pipelines present in namespace %v , Error: %v", 0, namespace, len(p.Items), namespace, err)) } log.Printf("Pipelines are present in namespace %v", namespace) } @@ -398,3 +376,36 @@ func GetLatestPipelinerun(c *clients.Clients, namespace string) (string, error) return prs.Items[0].Name, nil } + +func CheckLogVersion(c *clients.Clients, binary, namespace string) { + prname, err := GetLatestPipelinerun(store.Clients(), store.Namespace()) + if err != nil { + testsuit.T.Fail(fmt.Errorf("failed to get PipelineRun: %v", err)) + return + } + // Get PipelineRun logs + logsBuffer, err := getPipelinerunLogs(c, prname, namespace) + if err != nil { + testsuit.T.Fail(fmt.Errorf("failed to get PipelineRun logs: %v", err)) + return + } + + switch binary { + case "tkn-pac": + expectedVersion := os.Getenv("PAC_VERSION") + if !strings.Contains(logsBuffer.String(), expectedVersion) { + testsuit.T.Fail(fmt.Errorf("tkn-pac Version %s not found in logs:\n%s ", expectedVersion, logsBuffer)) + } + case "tkn": + expectedVersion := os.Getenv("TKN_CLIENT_VERSION") + if !strings.Contains(logsBuffer.String(), "Client version:") { + testsuit.T.Fail(fmt.Errorf("tkn client version not found! \nlogs:%s", logsBuffer)) + return + } + if !strings.Contains(logsBuffer.String(), expectedVersion) { + testsuit.T.Fail(fmt.Errorf("tkn Version %s not found in logs:\n%s ", expectedVersion, logsBuffer)) + } + default: + testsuit.T.Fail(fmt.Errorf("unknown binary or client")) + } +} diff --git a/pkg/pipelines/taskrun.go b/pkg/pipelines/taskrun.go index 25b1949c..c6d2585d 100644 --- a/pkg/pipelines/taskrun.go +++ b/pkg/pipelines/taskrun.go @@ -130,16 +130,16 @@ func ValidateTaskRunLabelPropogation(c *clients.Clients, trname, namespace strin testsuit.T.Errorf("failed to get task run %s in namespace %s \n %v", matched_tr, namespace, err) } - for key, val := range tr.ObjectMeta.Labels { + for key, val := range tr.Labels { labels[key] = val } - AssertLabelsMatch(labels, tr.ObjectMeta.Labels) + AssertLabelsMatch(labels, tr.Labels) if tr.Status.PodName != "" { pod := GetPodForTaskRun(c, namespace, tr) // This label is added to every Pod by the TaskRun controller labels[pipeline.TaskRunLabelKey] = tr.Name - AssertLabelsMatch(labels, pod.ObjectMeta.Labels) + AssertLabelsMatch(labels, pod.Labels) gauge.WriteMessage("Labels: \n\n %+v", createKeyValuePairs(labels)) } } diff --git a/pkg/tkn/tkn.go b/pkg/tkn/tkn.go index 5a176ee0..5f2fb8e1 100644 --- a/pkg/tkn/tkn.go +++ b/pkg/tkn/tkn.go @@ -106,6 +106,29 @@ func AssertClientVersion(binary string) { } } +func AssertServerVersion(binary string) { + var commandResult, unexpectedVersion string + + switch binary { + case "opc": + commandResult = cmd.MustSucceed("/tmp/opc", "version", "--server").Stdout() + components := [4]string{"Chains version", "Pipeline version", "Triggers version", "Operator version"} + expectedVersions := [4]string{os.Getenv("CHAINS_VERSION"), os.Getenv("PIPELINE_VERSION"), os.Getenv("TRIGGERS_VERSION"), os.Getenv("OPERATOR_VERSION")} + splittedCommandResult := strings.Split(commandResult, "\n") + for i := 0; i < 4; i++ { + if strings.Contains(splittedCommandResult[i], components[i]) { + if !strings.Contains(splittedCommandResult[i], expectedVersions[i]) { + unexpectedVersion = splittedCommandResult[i] + testsuit.T.Errorf("%s has an unexpected version: %s. Expected: %s", components[i], unexpectedVersion, expectedVersions[i]) + } + } + } + default: + testsuit.T.Errorf("Unknown binary or client") + } + +} + func ValidateQuickstarts() { cmd.MustSucceed("oc", "get", "consolequickstart", "install-app-and-associate-pipeline").Stdout() cmd.MustSucceed("oc", "get", "consolequickstart", "configure-pipeline-metrics").Stdout() diff --git a/pkg/triggers/helper.go b/pkg/triggers/helper.go index 6501098c..c83fe7e4 100644 --- a/pkg/triggers/helper.go +++ b/pkg/triggers/helper.go @@ -14,7 +14,6 @@ import ( "github.com/getgauge-contrib/gauge-go/testsuit" "github.com/openshift-pipelines/release-tests/pkg/config" - resource "github.com/openshift-pipelines/release-tests/pkg/config" "github.com/openshift-pipelines/release-tests/pkg/store" ) @@ -40,11 +39,11 @@ func CreateHTTPClient() *http.Client { // CreateHTTPSClient for connection re-use func CreateHTTPSClient() *http.Client { // Load client cert - cert, err := tls.LoadX509KeyPair(resource.Path("testdata/triggers/certs/server.crt"), resource.Path("testdata/triggers/certs/server.key")) + cert, err := tls.LoadX509KeyPair(config.Path("testdata/triggers/certs/server.crt"), config.Path("testdata/triggers/certs/server.key")) if err != nil { log.Fatal(err) } - caCert, err := os.ReadFile(resource.Path("testdata/triggers/certs/ca.crt")) + caCert, err := os.ReadFile(config.Path("testdata/triggers/certs/ca.crt")) if err != nil { log.Fatal(err) } diff --git a/pkg/triggers/triggers.go b/pkg/triggers/triggers.go index 9d09aca6..37f8d054 100644 --- a/pkg/triggers/triggers.go +++ b/pkg/triggers/triggers.go @@ -58,6 +58,13 @@ func ExposeEventListner(c *clients.Clients, elname, namespace string) string { return GetRoute(elname, namespace) } +func ExposeDeploymentConfig(c *clients.Clients, elname, port, namespace string) string { + cmd.MustSucceed("oc", "expose", "dc/"+elname, "-n", namespace, "--target-port="+port) + cmd.MustSucceed("oc", "expose", "svc", elname, "-n", namespace, "--target-port="+port) + + return elname +} + func ExposeEventListenerForTLS(c *clients.Clients, elname, namespace string) string { svcName, portName := getServiceNameAndPort(c, elname, namespace) domain := getDomain() @@ -191,7 +198,7 @@ func AssertElResponse(c *clients.Clients, resp *http.Response, elname, namespace EventListener: elname, Namespace: namespace, } - + //nolint:errcheck defer resp.Body.Close() var gotBody sink.Response err := json.NewDecoder(resp.Body).Decode(&gotBody) @@ -274,6 +281,7 @@ func GetRoute(elname, namespace string) string { if err != nil { testsuit.T.Fail(err) } + //nolint:errcheck defer file.Close() if _, err := file.WriteString(serverCert); err != nil { diff --git a/pkg/wait/wait.go b/pkg/wait/wait.go index f6ffbc40..c0170d88 100644 --- a/pkg/wait/wait.go +++ b/pkg/wait/wait.go @@ -127,10 +127,13 @@ func Succeed(name string) ConditionAccessorFn { return func(ca apis.ConditionAccessor) (bool, error) { c := ca.GetCondition(apis.ConditionSucceeded) if c != nil { - if c.Status == corev1.ConditionTrue { + switch c.Status { + case corev1.ConditionTrue: return true, nil - } else if c.Status == corev1.ConditionFalse { + case corev1.ConditionFalse: return true, fmt.Errorf("%q failed", name) + default: + return false, nil } } return false, nil @@ -143,10 +146,13 @@ func Failed(name string) ConditionAccessorFn { return func(ca apis.ConditionAccessor) (bool, error) { c := ca.GetCondition(apis.ConditionSucceeded) if c != nil { - if c.Status == corev1.ConditionTrue { + switch c.Status { + case corev1.ConditionTrue: return true, fmt.Errorf("%q succeeded", name) - } else if c.Status == corev1.ConditionFalse { + case corev1.ConditionFalse: return true, nil + default: + return false, nil } } return false, nil @@ -159,13 +165,16 @@ func FailedWithReason(reason, name string) ConditionAccessorFn { return func(ca apis.ConditionAccessor) (bool, error) { c := ca.GetCondition(apis.ConditionSucceeded) if c != nil { - if c.Status == corev1.ConditionFalse { + switch c.Status { + case corev1.ConditionFalse: if c.Reason == reason { return true, nil } return true, fmt.Errorf("%q completed with the wrong reason, was: %s, expected: %s", name, reason, c.Reason) - } else if c.Status == corev1.ConditionTrue { + case corev1.ConditionTrue: return true, fmt.Errorf("%q completed successfully, should have been failed with reason %q", name, reason) + default: + return false, nil } } return false, nil @@ -178,13 +187,16 @@ func FailedWithMessage(message, name string) ConditionAccessorFn { return func(ca apis.ConditionAccessor) (bool, error) { c := ca.GetCondition(apis.ConditionSucceeded) if c != nil { - if c.Status == corev1.ConditionFalse { + switch c.Status { + case corev1.ConditionFalse: if strings.Contains(c.Message, message) { return true, nil } return true, fmt.Errorf("%q completed with the wrong message: %s", name, c.Message) - } else if c.Status == corev1.ConditionTrue { + case corev1.ConditionTrue: return true, fmt.Errorf("%q completed successfully, should have been failed with message %q", name, message) + default: + return false, nil } } return false, nil diff --git a/specs/chains/chains.spec b/specs/chains/chains.spec index a67e0bb5..ab7c3869 100644 --- a/specs/chains/chains.spec +++ b/specs/chains/chains.spec @@ -27,7 +27,6 @@ Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Update the TektonConfig with taskrun format as "in-toto" taskrun storage as "oci" oci storage as "oci" transparency mode as "true" * Store Cosign public key in file * Verify that image registry variable is exported diff --git a/specs/clustertasks/clustertask-multiarch.spec b/specs/clustertasks/clustertask-multiarch.spec deleted file mode 100644 index fefca260..00000000 --- a/specs/clustertasks/clustertask-multiarch.spec +++ /dev/null @@ -1,117 +0,0 @@ -PIPELINES-14 -# Verify Clustertasks E2E spec - -Pre condition: - * Validate Operator should be installed - -## jib-maven pipelinerun: PIPELINES-17-TC01 -Tags: linux/amd64, clustertasks, non-admin, jib-maven, sanity -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelines/jib-maven.yaml | - |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/pipelineruns/jib-maven.yaml | - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |jib-maven-run |successful|no | - -## jib-maven P&Z pipelinerun: PIPELINES-17-TC02 -Tags: linux/ppc64le, linux/s390x, linux/arm64, clustertasks, non-admin, jib-maven, sanity -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelines/jib-maven-pz.yaml | - |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/pipelineruns/jib-maven-pz.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |jib-maven-pz-run |successful|no | - -## kn-apply pipelinerun: PIPELINES-17-TC03 -Tags: e2e, linux/amd64, clustertasks, non-admin, kn-apply -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|--------------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/kn-apply.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |kn-apply-run |successful|no | - -## kn-apply p&z pipelinerun: PIPELINES-17-TC04 -Tags: e2e, linux/ppc64le, linux/s390x, clustertasks, non-admin, kn-apply -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|------------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/kn-apply-multiarch.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |kn-apply-pz-run |successful|no | - -## kn pipelinerun: PIPELINES-17-TC05 -Tags: e2e, linux/amd64, clustertasks, non-admin, kn -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|--------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/kn.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |kn-run |successful|no | - -## kn p&z pipelinerun: PIPELINES-17-TC06 -Tags: e2e, linux/ppc64le, linux/s390x, clustertasks, non-admin, kn -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|-----------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/kn-pz.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |kn-pz-run |successful|no | \ No newline at end of file diff --git a/specs/clustertasks/clustertask.spec b/specs/clustertasks/clustertask.spec deleted file mode 100644 index 8aefbf55..00000000 --- a/specs/clustertasks/clustertask.spec +++ /dev/null @@ -1,281 +0,0 @@ -PIPELINES-16 -# Verify Clustertasks E2E spec - -Pre condition: - * Validate Operator should be installed - -## buildah pipelinerun: PIPELINES-16-TC01 -Tags: e2e, clustertasks, non-admin, buildah, sanity -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|-------------------------------------------------| - |1 |testdata/v1/clustertask/pipelines/buildah.yaml | - |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/pipelineruns/buildah.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |buildah-run |successful|no | - -## buildah disconnected pipelinerun: PIPELINES-16-TC02 -Tags: disconnected-e2e, clustertasks, non-admin, buildah -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|--------------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelines/buildah.yaml | - |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/pipelineruns/buildah-disconnected.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |buildah-disconnected-run|successful|no | - -## git-cli pipelinerun: PIPELINES-16-TC03 -Tags: e2e, clustertasks, non-admin, git-cli -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|-------------------------------------------------| - |1 |testdata/v1/clustertask/pipelines/git-cli.yaml | - |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/pipelineruns/git-cli.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |git-cli-run |successful|no | - -## git-cli read private repo pipelinerun: PIPELINES-16-TC04 -Tags: e2e, clustertasks, non-admin, git-cli -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|-----------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelines/git-cli-read-private.yaml| - |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/secrets/ssh-key.yaml | - * Link secret "ssh-key" to service account "pipeline" - * Create - |S.NO|resource_dir | - |----|--------------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/git-cli-read-private.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |git-cli-read-private-run|successful|no | - -## git-cli read private repo using different service account pipelinerun: PIPELINES-16-TC05 -Tags: e2e, clustertasks, non-admin, git-cli -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|-----------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelines/git-cli-read-private.yaml| - |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/secrets/ssh-key.yaml | - |4 |testdata/v1/clustertask/serviceaccount/ssh-sa.yaml | - |5 |testdata/v1/clustertask/rolebindings/ssh-sa-scc.yaml | - * Link secret "ssh-key" to service account "ssh-sa" - * Create - |S.NO|resource_dir | - |----|-----------------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/git-cli-read-private-sa.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|---------------------------|----------|-----------------------| - |1 |git-cli-read-private-sa-run|successful|no | - -## maven pipelinerun: PIPELINES-16-TC06 -Tags: e2e, clustertasks, non-admin, maven -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelines/maven.yaml | - |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/configmaps/maven-settings.yaml| - |4 |testdata/v1/clustertask/pipelineruns/maven.yaml | - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |maven-run |successful|no | - -## openshift-client pipelinerun: PIPELINES-16-TC07 -Tags: e2e, clustertasks, non-admin, openshift-client -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|----------------------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/openshift-client.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|--------------------|----------|-----------------------| - |1 |openshift-client-run|successful|no | - -## skopeo-copy pipelinerun: PIPELINES-16-TC08 -Tags: e2e, clustertasks, non-admin, skopeo-copy -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|-----------------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/skopeo-copy.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |skopeo-copy-run |successful|no | - -## tkn pipelinerun: PIPELINES-16-TC09 -Tags: e2e, clustertasks, non-admin, tkn -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|---------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/tkn.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |tkn-run |successful|no | - -## tkn pac pipelinerun: PIPELINES-16-TC10 -Tags: e2e, clustertasks, non-admin, tkn -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|-------------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/tkn-pac.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |tkn-pac-run |successful|no | - -## tkn version pipelinerun: PIPELINES-16-TC11 -Tags: e2e, clustertasks, non-admin, tkn -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|-----------------------------------------------------| - |1 |testdata/v1/clustertask/pipelineruns/tkn-version.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |tkn-version-run |successful|no | - -## git-clone read private repo taskrun PIPELINES-16-TC12 -Tags: e2e, clustertasks, non-admin, git-clone, sanity -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical -CustomerScenario: yes - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - | S.NO | resource_dir | - |------|---------------------------------------------------------------| - | 1 | testdata/v1/clustertask/pipelines/git-clone-read-private.yaml | - | 2 | testdata/pvc/pvc.yaml | - | 3 | testdata/v1/clustertask/secrets/ssh-key.yaml | - * Link secret "ssh-key" to service account "pipeline" - * Create - | S.NO | resource_dir | - |------|-----------------------------------------------------------------| - | 1 | testdata/v1/clustertask/pipelineruns/git-clone-read-private.yaml| - * Verify pipelinerun - | S.NO | pipeline_run_name | status | check_label_propagation | - |------|-------------------------------------|------------|-------------------------| - | 1 | git-clone-read-private-pipeline-run | successful | no | - -## git-clone read private repo using different service account taskrun PIPELINES-16-TC13 -Tags: e2e, clustertasks, non-admin, git-clone -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Verify ServiceAccount "pipeline" exist - * Create - | S.NO | resource_dir | - |------|---------------------------------------------------------------| - | 1 | testdata/v1/clustertask/pipelines/git-clone-read-private.yaml | - | 2 | testdata/pvc/pvc.yaml | - | 3 | testdata/v1/clustertask/secrets/ssh-key.yaml | - | 4 | testdata/v1/clustertask/serviceaccount/ssh-sa.yaml | - | 5 | testdata/v1/clustertask/rolebindings/ssh-sa-scc.yaml | - * Link secret "ssh-key" to service account "ssh-sa" - * Create - | S.NO | resource_dir | - |------|--------------------------------------------------------------------| - | 1 | testdata/v1/clustertask/pipelineruns/git-clone-read-private-sa.yaml| - * Verify pipelinerun - | S.NO | pipeline_run_name | status | check_label_propagation | - |------|----------------------------------------|------------|-------------------------| - | 1 | git-clone-read-private-pipeline-sa-run | successful | no | \ No newline at end of file diff --git a/specs/ecosystem/ecosystem-multiarch.spec b/specs/ecosystem/ecosystem-multiarch.spec new file mode 100644 index 00000000..c3ed9105 --- /dev/null +++ b/specs/ecosystem/ecosystem-multiarch.spec @@ -0,0 +1,111 @@ +PIPELINES-32 +# Verify ecosystem E2E spec + +Pre condition: + * Validate Operator should be installed + +## jib-maven pipelinerun: PIPELINES-32-TC01 +Tags: linux/amd64, ecosystem, non-admin, jib-maven, sanity +Component: Pipelines +Level: Integration +Type: Functional +Importance: Critical + +Steps: + * Create + |S.NO|resource_dir | + |----|------------------------------------------------------| + |1 |testdata/ecosystem/pipelines/jib-maven.yaml | + |2 |testdata/pvc/pvc.yaml | + |3 |testdata/ecosystem/pipelineruns/jib-maven.yaml | + * Verify pipelinerun + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |jib-maven-run |successful| + +## jib-maven P&Z pipelinerun: PIPELINES-32-TC02 +Tags: linux/ppc64le, linux/s390x, linux/arm64, ecosystem, non-admin, jib-maven, sanity +Component: Pipelines +Level: Integration +Type: Functional +Importance: Critical + +Steps: + * Create + |S.NO|resource_dir | + |----|------------------------------------------------------| + |1 |testdata/ecosystem/pipelines/jib-maven-pz.yaml | + |2 |testdata/pvc/pvc.yaml | + |3 |testdata/ecosystem/pipelineruns/jib-maven-pz.yaml| + * Verify pipelinerun + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |jib-maven-pz-run |successful| + +## kn-apply pipelinerun: PIPELINES-32-TC03 +Tags: e2e, linux/amd64, ecosystem, non-admin, kn-apply +Component: Pipelines +Level: Integration +Type: Functional +Importance: Critical + +Steps: + * Create + |S.NO|resource_dir | + |----|--------------------------------------------------| + |1 |testdata/ecosystem/pipelineruns/kn-apply.yaml| + * Verify pipelinerun + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |kn-apply-run |successful| + +## kn-apply p&z pipelinerun: PIPELINES-32-TC04 +Tags: e2e, linux/ppc64le, linux/s390x, ecosystem, non-admin, kn-apply +Component: Pipelines +Level: Integration +Type: Functional +Importance: Critical + +Steps: + * Create + |S.NO|resource_dir | + |----|------------------------------------------------------------| + |1 |testdata/ecosystem/pipelineruns/kn-apply-multiarch.yaml| + * Verify pipelinerun + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |kn-apply-pz-run |successful| + +## kn pipelinerun: PIPELINES-32-TC05 +Tags: e2e, linux/amd64, ecosystem, non-admin, kn +Component: Pipelines +Level: Integration +Type: Functional +Importance: Critical + +Steps: + * Create + |S.NO|resource_dir | + |----|--------------------------------------------| + |1 |testdata/ecosystem/pipelineruns/kn.yaml| + * Verify pipelinerun + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |kn-run |successful| + +## kn p&z pipelinerun: PIPELINES-32-TC06 +Tags: e2e, linux/ppc64le, linux/s390x, ecosystem, non-admin, kn +Component: Pipelines +Level: Integration +Type: Functional +Importance: Critical + +Steps: + * Create + |S.NO|resource_dir | + |----|-----------------------------------------------| + |1 |testdata/ecosystem/pipelineruns/kn-pz.yaml| + * Verify pipelinerun + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |kn-pz-run |successful| \ No newline at end of file diff --git a/specs/clustertasks/clustertask-s2i.spec b/specs/ecosystem/ecosystem-s2i.spec similarity index 70% rename from specs/clustertasks/clustertask-s2i.spec rename to specs/ecosystem/ecosystem-s2i.spec index a4b92cc8..dca4f117 100644 --- a/specs/clustertasks/clustertask-s2i.spec +++ b/specs/ecosystem/ecosystem-s2i.spec @@ -1,160 +1,157 @@ -PIPELINES-14 -# Verify Clustertasks E2E spec +PIPELINES-33 +# Verify Ecosystem E2E spec Pre condition: * Validate Operator should be installed - -## S2I nodejs pipelinerun: PIPELINES-14-TC01 -Tags: e2e, clustertasks, non-admin, s2i, sanity +## S2I nodejs pipelinerun: PIPELINES-33-TC01 +Tags: e2e, ecosystem, non-admin, s2i, sanity Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create - |S.NO|resource_dir | - |----|--------------------------------------------------------| - |1 |testdata/v1beta1/pipelinerun/s2i-nodejs-pipelinerun.yaml| + |S.NO|resource_dir | + |----|-------------------------------------------------------------| + |1 |testdata/ecosystem/pipelines/nodejs-ex-git.yaml | + |2 |testdata/pvc/pvc.yaml | + |3 |testdata/ecosystem/deploymentconfigs/nodejs-ex-git.yaml | + |4 |testdata/ecosystem/imagestreams/nodejs-ex-git.yaml | + |5 |testdata/ecosystem/pipelineruns/nodejs-ex-git.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_lable_propagation| - |----|-----------------|-----------|-----------------------| - |1 |nodejs-ex-git-pr |successful |no | - -## S2I dotnet pipelinerun: PIPELINES-14-TC02 -Tags: e2e, clustertasks, non-admin, s2i, skip_linux/ppc64le + |S.NO|pipeline_run_name|status | + |----|-----------------|-----------| + |1 |nodejs-ex-git-pr |successful | + * Expose Deployment config "nodejs-ex-git" on port "3000" + * Get route url of the route "nodejs-ex-git" + * Validate that route URL contains "See Also" + +## S2I dotnet pipelinerun: PIPELINES-33-TC02 +Tags: e2e, ecosystem, non-admin, s2i, skip_linux/ppc64le Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-dotnet.yaml| + |1 |testdata/ecosystem/pipelines/s2i-dotnet.yaml | |2 |testdata/pvc/pvc.yaml | * Get tags of the imagestream "dotnet" from namespace "openshift" and store to variable "dotnet-tags" * Start and verify dotnet pipeline "s2i-dotnet-pipeline" with values stored in variable "dotnet-tags" with workspace "name=source,claimName=shared-pvc" -## S2I golang pipelinerun: PIPELINES-14-TC03 -Tags: e2e, clustertasks, non-admin, s2i +## S2I golang pipelinerun: PIPELINES-33-TC03 +Tags: e2e, ecosystem, non-admin, s2i, sanity Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-golang.yaml| + |1 |testdata/ecosystem/pipelines/s2i-go.yaml | |2 |testdata/pvc/pvc.yaml | * Get tags of the imagestream "golang" from namespace "openshift" and store to variable "golang-tags" * Start and verify pipeline "s2i-go-pipeline" with param "VERSION" with values stored in variable "golang-tags" with workspace "name=source,claimName=shared-pvc" -## S2I java pipelinerun: PIPELINES-14-TC04 -Tags: e2e, clustertasks, non-admin, s2i, sanity +## S2I java pipelinerun: PIPELINES-33-TC04 +Tags: ecosystem, non-admin, s2i Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-java.yaml | + |1 |testdata/ecosystem/pipelines/s2i-java.yaml | |2 |testdata/pvc/pvc.yaml | * Get tags of the imagestream "java" from namespace "openshift" and store to variable "java-tags" * Start and verify pipeline "s2i-java-pipeline" with param "VERSION" with values stored in variable "java-tags" with workspace "name=source,claimName=shared-pvc" -## S2I nodejs pipelinerun: PIPELINES-14-TC05 -Tags: e2e, clustertasks, non-admin, s2i +## S2I nodejs pipelinerun: PIPELINES-33-TC05 +Tags: e2e, ecosystem, non-admin, s2i Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-nodejs.yaml| + |1 |testdata/ecosystem/pipelines/s2i-nodejs.yaml | |2 |testdata/pvc/pvc.yaml | * Get tags of the imagestream "nodejs" from namespace "openshift" and store to variable "nodejs-tags" * Start and verify pipeline "s2i-nodejs-pipeline" with param "VERSION" with values stored in variable "nodejs-tags" with workspace "name=source,claimName=shared-pvc" -## S2I perl pipelinerun: PIPELINES-14-TC06 -Tags: e2e, clustertasks, non-admin, s2i +## S2I perl pipelinerun: PIPELINES-33-TC06 +Tags: e2e, ecosystem, non-admin, s2i Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-perl.yaml | + |1 |testdata/ecosystem/pipelines/s2i-perl.yaml | |2 |testdata/pvc/pvc.yaml | * Get tags of the imagestream "perl" from namespace "openshift" and store to variable "perl-tags" * Start and verify pipeline "s2i-perl-pipeline" with param "VERSION" with values stored in variable "perl-tags" with workspace "name=source,claimName=shared-pvc" -## S2I php pipelinerun: PIPELINES-14-TC07 -Tags: e2e, clustertasks, non-admin, s2i +## S2I php pipelinerun: PIPELINES-33-TC07 +Tags: e2e, ecosystem, non-admin, s2i Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|---------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-php.yaml| + |1 |testdata/ecosystem/pipelines/s2i-php.yaml | |2 |testdata/pvc/pvc.yaml | * Get tags of the imagestream "php" from namespace "openshift" and store to variable "php-tags" * Start and verify pipeline "s2i-php-pipeline" with param "VERSION" with values stored in variable "php-tags" with workspace "name=source,claimName=shared-pvc" -## S2I python pipelinerun: PIPELINES-14-TC08 -Tags: e2e, clustertasks, non-admin, s2i +## S2I python pipelinerun: PIPELINES-33-TC08 +Tags: e2e, ecosystem, non-admin, s2i Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-python.yaml| + |1 |testdata/ecosystem/pipelines/s2i-python.yaml | |2 |testdata/pvc/pvc.yaml | * Get tags of the imagestream "python" from namespace "openshift" and store to variable "python-tags" * Start and verify pipeline "s2i-python-pipeline" with param "VERSION" with values stored in variable "python-tags" with workspace "name=source,claimName=shared-pvc" -## S2I ruby pipelinerun: PIPELINES-14-TC09 -Tags: e2e, clustertasks, non-admin, s2i +## S2I ruby pipelinerun: PIPELINES-33-TC09 +Tags: e2e, ecosystem, non-admin, s2i Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|----------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-ruby.yaml| + |1 |testdata/ecosystem/pipelines/s2i-ruby.yaml | |2 |testdata/pvc/pvc.yaml | * Get tags of the imagestream "ruby" from namespace "openshift" and store to variable "ruby-tags" * Start and verify pipeline "s2i-ruby-pipeline" with param "VERSION" with values stored in variable "ruby-tags" with workspace "name=source,claimName=shared-pvc" \ No newline at end of file diff --git a/specs/ecosystem/ecosystem.spec b/specs/ecosystem/ecosystem.spec index 47fd4b59..29cc1dee 100644 --- a/specs/ecosystem/ecosystem.spec +++ b/specs/ecosystem/ecosystem.spec @@ -12,7 +12,6 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-------------------------------------------------| @@ -20,11 +19,10 @@ Steps: |2 |testdata/pvc/pvc.yaml | |3 |testdata/ecosystem/pipelineruns/buildah.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |buildah-run |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |buildah-run |successful| -## needs more tweaks ## buildah disconnected pipelinerun: PIPELINES-29-TC02 Tags: disconnected-e2e, ecosystem, tasks, non-admin, buildah Component: Pipelines @@ -33,7 +31,6 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|--------------------------------------------------------------| @@ -41,9 +38,9 @@ Steps: |2 |testdata/pvc/pvc.yaml | |3 |testdata/ecosystem/pipelineruns/buildah-disconnected.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |buildah-disconnected-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |buildah-disconnected-run|successful| ## git-cli pipelinerun: PIPELINES-29-TC03 Tags: e2e, ecosystem, tasks, non-admin, git-cli @@ -53,7 +50,6 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-------------------------------------------------| @@ -61,9 +57,9 @@ Steps: |2 |testdata/pvc/pvc.yaml | |3 |testdata/ecosystem/pipelineruns/git-cli.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |git-cli-run |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |git-cli-run |successful| ## git-cli read private repo pipelinerun: PIPELINES-29-TC04 Tags: e2e, ecosystem, non-admin, git-cli @@ -73,22 +69,21 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------------| - |1 |testdata/ecosystem/pipelines/git-cli-read-private.yaml| + |1 |testdata/ecosystem/pipelines/git-cli-read-private.yaml | |2 |testdata/pvc/pvc.yaml | - |3 |testdata/ecosystem/secrets/ssh-key.yaml | + |3 |testdata/ecosystem/secrets/ssh-key.yaml | * Link secret "ssh-key" to service account "pipeline" * Create |S.NO|resource_dir | |----|--------------------------------------------------------------| |1 |testdata/ecosystem/pipelineruns/git-cli-read-private.yaml| * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |git-cli-read-private-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |git-cli-read-private-run|successful| ## git-cli read private repo using different service account pipelinerun: PIPELINES-29-TC05 Tags: e2e, ecosystem, non-admin, git-cli @@ -98,24 +93,23 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------------| - |1 |testdata/ecosystem/pipelines/git-cli-read-private.yaml| + |1 |testdata/ecosystem/pipelines/git-cli-read-private.yaml | |2 |testdata/pvc/pvc.yaml | - |3 |testdata/ecosystem/secrets/ssh-key.yaml | - |4 |testdata/ecosystem/serviceaccount/ssh-sa.yaml | - |5 |testdata/ecosystem/rolebindings/ssh-sa-scc.yaml | + |3 |testdata/ecosystem/secrets/ssh-key.yaml | + |4 |testdata/ecosystem/serviceaccount/ssh-sa.yaml | + |5 |testdata/ecosystem/rolebindings/ssh-sa-scc.yaml | * Link secret "ssh-key" to service account "ssh-sa" * Create |S.NO|resource_dir | |----|-----------------------------------------------------------------| - |1 |testdata/ecosystem/pipelineruns/git-cli-read-private-sa.yaml| + |1 |testdata/ecosystem/pipelineruns/git-cli-read-private-sa.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|---------------------------|----------|-----------------------| - |1 |git-cli-read-private-sa-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|---------------------------|----------| + |1 |git-cli-read-private-sa-run|successful| ## git-clone read private repo taskrun PIPELINES-29-TC06 Tags: e2e, ecosystem, non-admin, git-clone, sanity @@ -126,22 +120,21 @@ Importance: Critical CustomerScenario: yes Steps: - * Verify ServiceAccount "pipeline" exist * Create | S.NO | resource_dir | |------|---------------------------------------------------------------| - | 1 | testdata/ecosystem/pipelines/git-clone-read-private.yaml | + | 1 | testdata/ecosystem/pipelines/git-clone-read-private.yaml | | 2 | testdata/pvc/pvc.yaml | - | 3 | testdata/ecosystem/secrets/ssh-key.yaml | + | 3 | testdata/ecosystem/secrets/ssh-key.yaml | * Link secret "ssh-key" to service account "pipeline" * Create | S.NO | resource_dir | |------|-----------------------------------------------------------------| - | 1 | testdata/ecosystem/pipelineruns/git-clone-read-private.yaml| + | 1 | testdata/ecosystem/pipelineruns/git-clone-read-private.yaml | * Verify pipelinerun - | S.NO | pipeline_run_name | status | check_label_propagation | - |------|-------------------------------------|------------|-------------------------| - | 1 | git-clone-read-private-pipeline-run | successful | no | + | S.NO | pipeline_run_name | status | + |------|-------------------------------------|------------| + | 1 | git-clone-read-private-pipeline-run | successful | ## git-clone read private repo using different service account taskrun PIPELINES-29-TC07 Tags: e2e, ecosystem, non-admin, git-clone @@ -151,24 +144,23 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create | S.NO | resource_dir | |------|---------------------------------------------------------------| - | 1 | testdata/ecosystem/pipelines/git-clone-read-private.yaml | + | 1 | testdata/ecosystem/pipelines/git-clone-read-private.yaml | | 2 | testdata/pvc/pvc.yaml | - | 3 | testdata/ecosystem/secrets/ssh-key.yaml | - | 4 | testdata/ecosystem/serviceaccount/ssh-sa.yaml | - | 5 | testdata/ecosystem/rolebindings/ssh-sa-scc.yaml | + | 3 | testdata/ecosystem/secrets/ssh-key.yaml | + | 4 | testdata/ecosystem/serviceaccount/ssh-sa.yaml | + | 5 | testdata/ecosystem/rolebindings/ssh-sa-scc.yaml | * Link secret "ssh-key" to service account "ssh-sa" * Create | S.NO | resource_dir | |------|--------------------------------------------------------------------| | 1 | testdata/ecosystem/pipelineruns/git-clone-read-private-sa.yaml| * Verify pipelinerun - | S.NO | pipeline_run_name | status | check_label_propagation | - |------|----------------------------------------|------------|-------------------------| - | 1 | git-clone-read-private-pipeline-sa-run | successful | no | + | S.NO | pipeline_run_name | status | + |------|----------------------------------------|------------| + | 1 | git-clone-read-private-pipeline-sa-run | successful | ## openshift-client pipelinerun: PIPELINES-29-TC08 Tags: e2e, ecosystem, tasks, non-admin, openshift-client @@ -178,15 +170,14 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|----------------------------------------------------------| |1 |testdata/ecosystem/pipelineruns/openshift-client.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|--------------------|----------|-----------------------| - |1 |openshift-client-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|--------------------|----------| + |1 |openshift-client-run|successful| ## skopeo-copy pipelinerun: PIPELINES-29-TC09 Tags: e2e, ecosystem, tasks, non-admin, skopeo-copy @@ -196,15 +187,14 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------| |1 |testdata/ecosystem/pipelineruns/skopeo-copy.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |skopeo-copy-run |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |skopeo-copy-run |successful| ## tkn pipelinerun: PIPELINES-29-TC10 Tags: e2e, ecosystem, tasks, non-admin, tkn @@ -214,15 +204,14 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|---------------------------------------------| |1 |testdata/ecosystem/pipelineruns/tkn.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |tkn-run |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |tkn-run |successful| ## tkn pac pipelinerun: PIPELINES-29-TC11 Tags: e2e, ecosystem, tasks, non-admin, tkn @@ -232,15 +221,15 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-------------------------------------------------| |1 |testdata/ecosystem/pipelineruns/tkn-pac.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |tkn-pac-run |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |tkn-pac-run |successful| + * Verify "tkn-pac" version from the pipelinerun logs ## tkn version pipelinerun: PIPELINES-29-TC12 Tags: e2e, ecosystem, tasks, non-admin, tkn @@ -250,15 +239,15 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------| |1 |testdata/ecosystem/pipelineruns/tkn-version.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |tkn-version-run |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |tkn-version-run |successful| + * Verify "tkn" version from the pipelinerun logs ## maven pipelinerun: PIPELINES-29-TC13 Tags: e2e, ecosystem, tasks, non-admin, maven @@ -268,18 +257,33 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------| |1 |testdata/ecosystem/pipelines/maven.yaml | |2 |testdata/pvc/pvc.yaml | - |3 |testdata/v1/clustertask/configmaps/maven-settings.yaml| + |3 |testdata/ecosystem/configmaps/maven-settings.yaml | |4 |testdata/ecosystem/pipelineruns/maven.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |maven-run |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |maven-run |successful| -## need to add from clustertasks -## s2i, kn, kn-apply, tkn-pac, to be added. \ No newline at end of file +## Test the functionality of step action resolvers: PIPELINES-29-TC14 +Tags: e2e, sanity, ecosystem, non-admin +Component: Resolvers +Level: Integration +Type: Functional +Importance: High + +Steps: + * Create + |S.NO|resource_dir | + |----|-----------------------------------------------------------------| + |1 |testdata/ecosystem/tasks/git-clone-stepaction.yaml | + |2 |testdata/pvc/pvc.yaml | + |3 |testdata/ecosystem/pipelineruns/git-clone-stepaction.yaml | + * Verify pipelinerun + |S.NO|pipeline_run_name |status | + |----|-----------------------------------|------------| + |1 |git-clone-stepaction-run |successful | diff --git a/specs/manualapprovalgate/manual-approval-gate.spec b/specs/manualapprovalgate/manual-approval-gate.spec index 19d62899..0825b1c2 100644 --- a/specs/manualapprovalgate/manual-approval-gate.spec +++ b/specs/manualapprovalgate/manual-approval-gate.spec @@ -10,7 +10,6 @@ Importance: Critical Steps: * Validate manual approval gate deployment - * Verify ServiceAccount "pipeline" exist * Create | S.NO | resource_dir | |------|----------------------------------------------------------| @@ -28,7 +27,6 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create | S.NO | resource_dir | |------|----------------------------------------------------------| diff --git a/specs/olm.spec b/specs/olm.spec index 2943706b..3d961ad9 100644 --- a/specs/olm.spec +++ b/specs/olm.spec @@ -20,16 +20,16 @@ Steps: |------|--------------------------------| | 1 | testdata/hub/tektonhub.yaml | * Create secrets for Tekton Results + * Create signing-secrets for Tekton Chains + * Configure GitHub token for git resolver in TektonConfig + * Configure the bundles resolver + * Enable console plugin * Apply in namespace "openshift-pipelines" | S.NO | resource_dir | |------|--------------------------------| | 1 | testdata/pvc/tekton-logs.yaml | | 2 | testdata/results/result.yaml | * Create Results route - * Create signing-secrets for Tekton Chains - * Configure GitHub token for git resolver in TektonConfig - * Configure the bundles resolver - * Enable console plugin * Validate pipelines deployment * Validate triggers deployment * Validate PAC deployment diff --git a/specs/operator/addon.spec b/specs/operator/addon.spec index 7c515927..20890b04 100644 --- a/specs/operator/addon.spec +++ b/specs/operator/addon.spec @@ -4,71 +4,56 @@ PIPELINES-15 Pre condition: * Validate Operator should be installed -## Disable/Enable community clustertasks: PIPELINES-15-TC01 -Tags: e2e, integration, clustertasks, admin, addon, sanity +## Disable/Enable resolverTasks: PIPELINES-15-TC06 +Tags: e2e, integration, resolvertasks, admin, addon, sanity Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Update addon config with clusterTasks as "true" communityClustertasks as "true" and pipelineTemplates as "true" and expect message "" - * "community" clustertasks are "present" - * "tkn,openshift-client" clustertasks are "present" - * Update addon config with clusterTasks as "true" communityClustertasks as "false" and pipelineTemplates as "true" and expect message "" - * "community" clustertasks are "not present" - * "tkn,openshift-client" clustertasks are "present" - * Update addon config with clusterTasks as "true" communityClustertasks as "true" and pipelineTemplates as "true" and expect message "" - * "community" clustertasks are "present" - * "tkn,openshift-client" clustertasks are "present" + * Update addon config with resolverTasks as "false" and expect message "" + * Tasks "s2i-java" are "not present" in namespace "openshift-pipelines" + * Update addon config with resolverTasks as "true" and expect message "" + * Tasks "s2i-java" are "present" in namespace "openshift-pipelines" -## Disable/Enable clustertasks: PIPELINES-15-TC02 -Tags: e2e, integration, clustertasks, admin, addon, sanity +## Disable/Enable resolverTasks with additional Tasks: PIPELINES-15-TC07 +Tags: e2e, integration, resolvertasks, admin, addon Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Update addon config with clusterTasks as "true" communityClustertasks as "true" and pipelineTemplates as "true" and expect message "" - * "community" clustertasks are "present" - * "tkn,openshift-client" clustertasks are "present" - * Assert pipelines are "present" in "openshift" namespace - * Update addon config with clusterTasks as "false" communityClustertasks as "false" and pipelineTemplates as "false" and expect message "" - * "community" clustertasks are "not present" - * "tkn,openshift-client" clustertasks are "not present" - * Assert pipelines are "not present" in "openshift" namespace - * Update addon config with clusterTasks as "true" communityClustertasks as "true" and pipelineTemplates as "true" and expect message "" - * "community" clustertasks are "present" - * "tkn,openshift-client" clustertasks are "present" - * Assert pipelines are "present" in "openshift" namespace + * Update addon config with resolverTasks as "true" and expect message "" + * Tasks "s2i-java" are "present" in namespace "openshift-pipelines" + * Apply in namespace "openshift-pipelines" + |S.NO|resource_dir | + |----|-----------------------------------------------------| + |1 |testdata/ecosystem/tasks/hello.yaml | + * Tasks "hello" are "present" in namespace "openshift-pipelines" + * Update addon config with resolverTasks as "false" and expect message "" + * Tasks "s2i-java" are "not present" in namespace "openshift-pipelines" + * Tasks "hello" are "present" in namespace "openshift-pipelines" + * Update addon config with resolverTasks as "true" and expect message "" + * Tasks "s2i-java" are "present" in namespace "openshift-pipelines" + * Tasks "hello" are "present" in namespace "openshift-pipelines" -## Disable/Enable pipeline templates: PIPELINES-15-TC03 -Tags: e2e, integration, clustertasks, admin, addon, sanity +## Disable/Enable pipeline templates: PIPELINES-15-TC08 +Tags: e2e, integration, resolvertasks, admin, addon, sanity Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Update addon config with clusterTasks as "true" communityClustertasks as "true" and pipelineTemplates as "true" and expect message "" + * Update addon config with resolverTasks as "true" and pipelineTemplates as "true" and expect message "" * Assert pipelines are "present" in "openshift" namespace - * Update addon config with clusterTasks as "true" communityClustertasks as "true" and pipelineTemplates as "false" and expect message "" + * Update addon config with resolverTasks as "true" and pipelineTemplates as "false" and expect message "" * Assert pipelines are "not present" in "openshift" namespace - * Update addon config with clusterTasks as "true" communityClustertasks as "true" and pipelineTemplates as "true" and expect message "" + * Update addon config with resolverTasks as "true" and pipelineTemplates as "true" and expect message "" * Assert pipelines are "present" in "openshift" namespace -## Enable community cluster tasks when clustertask is disabled: PIPELINES-15-TC04 -Tags: e2e, integration, negative, admin, addon -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical -Pos/Neg: Negative - -Steps: - * Update addon config with clusterTasks as "false" communityClustertasks as "true" and pipelineTemplates as "false" and expect message "validation failed: communityClusterTasks cannot be true if clusterTask is false" - ## Enable pipeline templates when clustertask is disabled: PIPELINES-15-TC05 Tags: e2e, integration, negative, admin, addon Component: Pipelines @@ -78,40 +63,24 @@ Importance: Critical Pos/Neg: Negative Steps: - * Update addon config with clusterTasks as "false" communityClustertasks as "false" and pipelineTemplates as "true" and expect message "validation failed: pipelineTemplates cannot be true if clusterTask is false" + * Update addon config with resolverTasks as "false" and pipelineTemplates as "true" and expect message "validation failed: pipelineTemplates cannot be true if resolverTask is false" -## Disable/Enable resolverTasks: PIPELINES-15-TC06 -Tags: e2e, integration, resolverTasks, admin, addon, sanity +## Verify versioned ecosystem tasks: PIPELINES-15-TC09 +Tags: e2e, integration, addon Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Update addon config with resolverTasks as "false" and expect message "" - * Tasks "s2i-java" are "not present" in namespace "openshift-pipelines" - * Update addon config with resolverTasks as "true" and expect message "" - * Tasks "s2i-java" are "present" in namespace "openshift-pipelines" + * Verify versioned ecosystem tasks -## Disable/Enable resolverTasks with additional Tasks: PIPELINES-15-TC07 -Tags: e2e, integration, resolverTasks, admin, addon +## Verify versioned stepaction tasks: PIPELINES-15-TC010 +Tags: e2e, integration, addon Component: Pipelines Level: Integration Type: Functional Importance: Critical Steps: - * Update addon config with resolverTasks as "true" and expect message "" - * Tasks "s2i-java" are "present" in namespace "openshift-pipelines" - * Apply in namespace "openshift-pipelines" - |S.NO|resource_dir | - |----|-----------------------------------------------------| - |1 |testdata/ecosystem/tasks/hello.yaml | - * Tasks "hello" are "present" in namespace "openshift-pipelines" - * Update addon config with resolverTasks as "false" and expect message "" - * Tasks "s2i-java" are "not present" in namespace "openshift-pipelines" - * Tasks "hello" are "present" in namespace "openshift-pipelines" - * Update addon config with resolverTasks as "true" and expect message "" - * Tasks "s2i-java" are "present" in namespace "openshift-pipelines" - * Tasks "hello" are "present" in namespace "openshift-pipelines" - + * Verify versioned ecosystem step actions \ No newline at end of file diff --git a/specs/operator/auto-prune.spec b/specs/operator/auto-prune.spec index fd36e2b7..faf7465d 100644 --- a/specs/operator/auto-prune.spec +++ b/specs/operator/auto-prune.spec @@ -16,7 +16,6 @@ Importance: Critical This scenario tests auto prune functionality for taskrun resource Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -27,7 +26,7 @@ Steps: |4 |testdata/pruner/task/taskrun-for-pruner.yaml | * Update pruner config "with" keep "2" schedule "*/1 * * * *" resources "taskrun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace - * "2" taskrun(s) should be present within "120" seconds + * "2" taskrun(s) should be present within "180" seconds * "5" pipelinerun(s) should be present within "120" seconds * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -42,7 +41,6 @@ Importance: Critical This scenario tests auto prune functionality for pipelinerun resource Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -54,7 +52,7 @@ Steps: * Update pruner config "with" keep "2" schedule "*/1 * * * *" resources "pipelinerun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "2" pipelinerun(s) should be present within "120" seconds - * "7" taskrun(s) should be present within "120" seconds + * "7" taskrun(s) should be present within "180" seconds * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -69,7 +67,6 @@ Importance: Critical This scenario tests auto prune functionality for pipelinerun and taskrun resources Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -81,7 +78,7 @@ Steps: * Update pruner config "with" keep "2" schedule "*/1 * * * *" resources "pipelinerun,taskrun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "2" pipelinerun(s) should be present within "120" seconds - * "2" taskrun(s) should be present within "120" seconds + * "2" taskrun(s) should be present within "180" seconds * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -95,7 +92,6 @@ Importance: Critical This scenario tests auto prune functionality with global strategy keep-since Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -113,7 +109,7 @@ Steps: * Update pruner config "without" keep "" schedule "*/1 * * * *" resources "pipelinerun,taskrun" and "with" keep-since "2" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "5" pipelinerun(s) should be present within "120" seconds - * "10" taskrun(s) should be present within "120" seconds + * "10" taskrun(s) should be present within "180" seconds * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -129,7 +125,6 @@ Pruning should not happen for the resources of a namespace with annotation opera Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -142,10 +137,10 @@ Steps: * Update pruner config "with" keep "2" schedule "*/1 * * * *" resources "pipelinerun,taskrun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "5" pipelinerun(s) should be present within "120" seconds - * "10" taskrun(s) should be present within "120" seconds + * "10" taskrun(s) should be present within "180" seconds * Remove annotation "operator.tekton.dev/prune.skip" from namespace * "2" pipelinerun(s) should be present within "120" seconds - * "2" taskrun(s) should be present within "120" seconds + * "2" taskrun(s) should be present within "180" seconds * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -161,7 +156,6 @@ This scenario tests auto prune functionality for a namespace with annotation ope Only taskruns should get pruned for a namespace with annotation operator.tekton.dev/prune.resources=taskrun Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -174,7 +168,7 @@ Steps: * Update pruner config "with" keep "2" schedule "*/1 * * * *" resources "pipelinerun,taskrun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "5" pipelinerun(s) should be present within "120" seconds - * "2" taskrun(s) should be present within "120" seconds + * "2" taskrun(s) should be present within "180" seconds * Remove annotation "operator.tekton.dev/prune.resources" from namespace * "2" pipelinerun(s) should be present within "120" seconds * Remove auto pruner configuration from config CR @@ -192,7 +186,6 @@ This scenario tests auto prune functionality for a namespace with annotation ope Both taskruns and pipelineruns should get pruned for a namespace with annotation operator.tekton.dev/prune.resources=taskrun. Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -205,7 +198,7 @@ Steps: * Update pruner config "with" keep "2" schedule "*/1 * * * *" resources "taskrun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "2" pipelinerun(s) should be present within "120" seconds - * "2" taskrun(s) should be present within "120" seconds + * "2" taskrun(s) should be present within "180" seconds * Remove annotation "operator.tekton.dev/prune.resources" from namespace * Create |S.NO|resource_dir | @@ -213,7 +206,7 @@ Steps: |1 |testdata/pruner/pipeline/pipelinerun-for-pruner.yaml| |2 |testdata/pruner/task/taskrun-for-pruner.yaml | * "7" pipelinerun(s) should be present within "120" seconds - * "2" taskrun(s) should be present within "120" seconds + * "2" taskrun(s) should be present within "180" seconds * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -230,7 +223,6 @@ This scenario tests auto prune functionality for a namespace with annotation ope If the globaly strategy and the strategy of namespace is same, no need to define operator.tekton.dev/prune.strategy Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -243,7 +235,7 @@ Steps: * Update pruner config "with" keep "2" schedule "*/1 * * * *" resources "pipelinerun,taskrun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "3" pipelinerun(s) should be present within "120" seconds - * "3" taskrun(s) should be present within "120" seconds + * "3" taskrun(s) should be present within "180" seconds * Remove annotation "operator.tekton.dev/prune.keep" from namespace * "2" pipelinerun(s) should be present within "120" seconds * Remove auto pruner configuration from config CR @@ -261,7 +253,6 @@ This scenario tests auto prune functionality for a namespace with annotation ope If the globaly strategy and the strategy of namespace is same, no need to define operator.tekton.dev/prune.strategy Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -280,7 +271,7 @@ Steps: * Update pruner config "without" keep "" schedule "*/1 * * * *" resources "pipelinerun,taskrun" and "with" keep-since "10" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "5" pipelinerun(s) should be present within "120" seconds - * "10" taskrun(s) should be present within "120" seconds + * "10" taskrun(s) should be present within "180" seconds * Remove annotation "operator.tekton.dev/prune.keep-since" from namespace * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -297,7 +288,6 @@ This scenario tests auto prune functionality for a namespace with annotation ope If the globaly strategy and the strategy of namespace is different, the operator.tekton.dev/prune.strategy=strategy is must Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -310,10 +300,10 @@ Steps: * Update pruner config "without" keep "" schedule "*/1 * * * *" resources "pipelinerun,taskrun" and "with" keep-since "10" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "5" pipelinerun(s) should be present within "120" seconds - * "10" taskrun(s) should be present within "120" seconds + * "10" taskrun(s) should be present within "180" seconds * Annotate namespace with "operator.tekton.dev/prune.strategy=keep" * "2" pipelinerun(s) should be present within "120" seconds - * "2" taskrun(s) should be present within "120" seconds + * "2" taskrun(s) should be present within "180" seconds * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -326,7 +316,6 @@ Importance: Critical This scenrio tests auto prune functionality for a namespace with different schedule by annotating namespace with operator.tekton.dev/prune.schedule Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Create |S.NO|resource_dir | @@ -339,7 +328,7 @@ Steps: * Update pruner config "with" keep "2" schedule "*/8 * * * *" resources "pipelinerun,taskrun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace * "2" pipelinerun(s) should be present within "120" seconds - * "2" taskrun(s) should be present within "120" seconds + * "2" taskrun(s) should be present within "180" seconds * Remove annotation "operator.tekton.dev/prune.schedule" from namespace * Sleep for "60" seconds * Create @@ -348,7 +337,7 @@ Steps: |1 |testdata/pruner/pipeline/pipelinerun-for-pruner.yaml| |2 |testdata/pruner/task/taskrun-for-pruner.yaml | * "7" pipelinerun(s) should be present within "120" seconds - * "12" taskrun(s) should be present within "120" seconds + * "12" taskrun(s) should be present within "180" seconds * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace @@ -361,7 +350,6 @@ Importance: Critical This scenrio tests validation of auto pruner config Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Update pruner config with invalid data "with" keep "2" schedule "*/8 * * * *" resources "pipelinerun,taskrun" and "with" keep-since "2" and expect error message "validation failed: expected exactly one, got both: spec.pruner.keep, spec.pruner.keep-since" * Update pruner config with invalid data "with" keep "2" schedule "*/8 * * * *" resources "pipelinerun,taskrunas" and "without" keep-since "" and expect error message "validation failed: invalid value: taskrunas: spec.pruner.resources[1]" @@ -377,7 +365,6 @@ Importance: Critical This scenrio tests if auto prune job is not getting re-created for addition of random annotation to namespace. Test case fails if the cronjob gets re-created for addition of random annotation to namepsace. Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Update pruner config "with" keep "2" schedule "10 * * * *" resources "pipelinerun,taskrun" and "without" keep-since "" * Assert if cronjob with prefix "tekton-resource-pruner" is "present" in target namespace @@ -423,7 +410,6 @@ Importance: Critical CustomerScenario: yes Steps: - * Verify ServiceAccount "pipeline" exist * Remove auto pruner configuration from config CR * Assert if cronjob with prefix "tekton-resource-pruner" is "not present" in target namespace * Create diff --git a/specs/operator/post-upgrade.spec b/specs/operator/post-upgrade.spec index 8cfc80f1..6d24beaf 100644 --- a/specs/operator/post-upgrade.spec +++ b/specs/operator/post-upgrade.spec @@ -18,16 +18,16 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/triggers/github-ctb/push.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |pipelinerun-git-push-ctb|successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |pipelinerun-git-push-ctb|successful| * Get route for eventlistener "listener-triggerref" * Mock post event to "github" interceptor with event-type "pull_request", payload "testdata/triggers/triggersCRD/pull-request.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |parallel-pipelinerun |successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |parallel-pipelinerun |successful| * Get route for eventlistener "bitbucket-listener" * Mock post event to "bitbucket" interceptor with event-type "refs_changed", payload "testdata/triggers/bitbucket/refs-change-event.json", with TLS "false" * Assert eventlistener response @@ -36,18 +36,6 @@ Steps: |----|-----------------|-------| |1 |bitbucket-run |Failure| -## Verify S2I nodejs pipeline after upgrade: PIPELINES-19-TC02 -Tags: post-upgrade, e2e, clustertasks, non-admin, s2i -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Switch to project "releasetest-upgrade-s2i" - * Get tags of the imagestream "nodejs" from namespace "openshift" and store to variable "nodejs-tags" - * Start and verify pipeline "s2i-nodejs-pipeline" with param "VERSION" with values stored in variable "nodejs-tags" with workspace "name=source,claimName=shared-pvc" - ## Verify Event listener with TLS after upgrade: PIPELINES-19-TC03 Tags: post-upgrade, tls, triggers, admin, e2e, sanity Component: Triggers @@ -61,9 +49,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/push.json", with TLS "true" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|-------------------|----------|-----------------------| - |1 |simple-pipeline-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|-------------------|----------| + |1 |simple-pipeline-run|successful| ## Verify secret is linked to SA even after upgrade: PIPELINES-19-TC04 Tags: post-upgrade, e2e, clustertasks, non-admin, git-clone, sanity @@ -79,8 +67,20 @@ Steps: * Create | S.NO | resource_dir | |------|-----------------------------------------------------------------------| - | 1 | testdata/v1beta1/clustertask/pipelineruns/git-clone-read-private.yaml | + | 1 | testdata/ecosystem/pipelineruns/git-clone-read-private.yaml | * Verify pipelinerun - | S.NO | pipeline_run_name | status | check_label_propagation | - |------|-------------------------------------|------------|-------------------------| - | 1 | git-clone-read-private-pipeline-run | successful | no | \ No newline at end of file + | S.NO | pipeline_run_name | status | + |------|-------------------------------------|------------| + | 1 | git-clone-read-private-pipeline-run | successful | + +## Verify S2I golang pipeline after upgrade: PIPELINES-19-TC05 +Tags: post-upgrade, e2e, clustertasks, non-admin, s2i +Component: Pipelines +Level: Integration +Type: Functional +Importance: Critical + +Steps: + * Switch to project "releasetest-upgrade-s2i" + * Get tags of the imagestream "golang" from namespace "openshift" and store to variable "golang-tags" + * Start and verify pipeline "s2i-go-pipeline" with param "VERSION" with values stored in variable "golang-tags" with workspace "name=source,claimName=shared-pvc" \ No newline at end of file diff --git a/specs/operator/pre-upgrade.spec b/specs/operator/pre-upgrade.spec index 3cb662ce..fb209ef5 100644 --- a/specs/operator/pre-upgrade.spec +++ b/specs/operator/pre-upgrade.spec @@ -24,9 +24,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/triggers/github-ctb/push.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |pipelinerun-git-push-ctb|successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |pipelinerun-git-push-ctb|successful| * Delete "pipelinerun" named "pipelinerun-git-push-ctb" * Create |S.NO|resource_dir | @@ -40,9 +40,9 @@ Steps: * Mock post event to "github" interceptor with event-type "pull_request", payload "testdata/triggers/triggersCRD/pull-request.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |parallel-pipelinerun |successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |parallel-pipelinerun |successful| * Delete "pipelinerun" named "parallel-pipelinerun" * Create |S.NO|resource_dir | @@ -58,22 +58,6 @@ Steps: |1 |bitbucket-run |Failure| * Delete "taskrun" named "bitbucket-run" -## Setup S2I nodejs pipeline pre upgrade: PIPELINES-18-TC02 -Tags: pre-upgrade, e2e, clustertasks, non-admin, s2i -Component: Pipelines -Level: Integration -Type: Functional -Importance: Critical - -Steps: - * Create project "releasetest-upgrade-s2i" - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|------------------------------------------------------| - |1 |testdata/v1beta1/clustertask/pipelines/s2i-nodejs.yaml| - |2 |testdata/pvc/pvc.yaml | - ## Setup Eventlistener with TLS enabled pre upgrade: PIPELINES-18-TC03 Tags: pre-upgrade, tls, triggers, admin, e2e, sanity Component: Triggers @@ -95,9 +79,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/push.json", with TLS "true" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|-------------------|----------|-----------------------| - |1 |simple-pipeline-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|-------------------|----------| + |1 |simple-pipeline-run|successful| * Delete "pipelinerun" named "simple-pipeline-run" ## Setup link secret to pipeline SA PIPELINES-18-TC04 @@ -114,16 +98,32 @@ Steps: * Create | S.NO | resource_dir | |------|--------------------------------------------------------------------| - | 1 | testdata/v1beta1/clustertask/pipelines/git-clone-read-private.yaml | + | 1 | testdata/ecosystem/pipelines/git-clone-read-private.yaml | | 2 | testdata/pvc/pvc.yaml | - | 3 | testdata/v1/clustertask/secrets/ssh-key.yaml | + | 3 | testdata/ecosystem/secrets/ssh-key.yaml | * Link secret "ssh-key" to service account "pipeline" * Create | S.NO | resource_dir | |------|-----------------------------------------------------------------------| - | 1 | testdata/v1beta1/clustertask/pipelineruns/git-clone-read-private.yaml | + | 1 | testdata/ecosystem/pipelineruns/git-clone-read-private.yaml | * Verify pipelinerun - | S.NO | pipeline_run_name | status | check_label_propagation | - |------|-------------------------------------|------------|-------------------------| - | 1 | git-clone-read-private-pipeline-run | successful | no | - * Delete "pipelinerun" named "git-clone-read-private-pipeline-run" \ No newline at end of file + | S.NO | pipeline_run_name | status | + |------|-------------------------------------|------------| + | 1 | git-clone-read-private-pipeline-run | successful | + * Delete "pipelinerun" named "git-clone-read-private-pipeline-run" + +## Setup S2I golang pipeline pre upgrade: PIPELINES-18-TC05 +Tags: pre-upgrade, e2e, clustertasks, non-admin, s2i +Component: Pipelines +Level: Integration +Type: Functional +Importance: Critical + +Steps: + * Create project "releasetest-upgrade-s2i" + * Verify ServiceAccount "pipeline" exist + * Create + |S.NO|resource_dir | + |----|------------------------------------------------------| + |1 |testdata/ecosystem/pipelines/s2i-go.yaml| + |2 |testdata/pvc/pvc.yaml | \ No newline at end of file diff --git a/specs/pac/README.md b/specs/pac/README.md deleted file mode 100644 index fcc26631..00000000 --- a/specs/pac/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# PAC E2E tests -## _Pipelines-as-code_ -Pipelines as code is a project allowing you to define your CI/CD using Tekton PipelineRuns and Tasks in a file located in your source control management (SCM) system, such as GitHub or GitLab. This file is then used to automatically create a pipeline for a Pull Request or a Push to a branch. - -## _Settingup PAC in Gitlab_ - -- Create a New project in gitlab.com -- Change the visibility of the project to Public -- Set the main branch as unprotected branch -- Copy the project ID by clicking on three dots in project root directory and`export GITLAB_PROJECT_ID=` -- Click on your profile under `preferences` Under `User Settings --> Access tokens` -- Create a New Personal Access Token and `export GITLAB_TOKEN=` -- Create a new Public Group in GitLab and Copy the only the Group name from URL e.g: From GitLab URL `https://gitlab.com/groups/test324345` Copy only the group name `test324345` and `export GITLAB_GROUP_NAMESPACE=` -- Enter any WebhookSecret to be used for GitLab webhook `export GITLAB_WEBHOOK_TOKEN=` - -## Running PAC E2E tests -Export the following Env Variables -``` -export GITLAB_TOKEN= -export GITLAB_PROJECT_ID= -export GITLAB_GROUP_NAMESPACE= -export GITLAB_WEBHOOK_TOKEN= -``` - -To run pac e2e tests... - -``` -gauge run --log-level=debug --verbose --tags e2e specs/pac/pac-gitlab.spec -``` diff --git a/specs/pac/pac-gitlab.spec b/specs/pac/pac-gitlab.spec index 21a4cca8..5f2ec6b4 100644 --- a/specs/pac/pac-gitlab.spec +++ b/specs/pac/pac-gitlab.spec @@ -12,6 +12,5 @@ This scenario tests configuring PAC in Public GitLab project Steps: * Configure GitLab token for PAC tests - * Verify ServiceAccount "pipeline" exist * Create Smee deployment * Configure GitLab repo and validate pipelinerun diff --git a/specs/pipelines/bundles-resolver.spec b/specs/pipelines/bundles-resolver.spec index 84098450..9a2b910e 100644 --- a/specs/pipelines/bundles-resolver.spec +++ b/specs/pipelines/bundles-resolver.spec @@ -12,15 +12,14 @@ Type: Functional Importance: High Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------------------------| |1 |testdata/resolvers/pipelineruns/bundles-resolver-pipelinerun.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation | - |----|-----------------------------------|--------------------------------------| - |1 |bundles-resolver-pipelinerun |successful |no | + |S.NO|pipeline_run_name |status | + |----|-----------------------------------|------------| + |1 |bundles-resolver-pipelinerun |successful | ## Test the functionality of bundles resolver with parameter: PIPELINES-25-TC02 Tags: e2e, sanity @@ -30,12 +29,11 @@ Type: Functional Importance: High Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|---------------------------------------------------------------------------| |1 |testdata/resolvers/pipelineruns/bundles-resolver-pipelinerun-param.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation | - |----|-----------------------------------|--------------------------------------| - |1 |bundles-resolver-pipelinerun-param |successful |no | \ No newline at end of file + |S.NO|pipeline_run_name |status | + |----|-----------------------------------|------------| + |1 |bundles-resolver-pipelinerun-param |successful | \ No newline at end of file diff --git a/specs/pipelines/cluster-resolvers.spec b/specs/pipelines/cluster-resolvers.spec index d072b8b1..188833c8 100644 --- a/specs/pipelines/cluster-resolvers.spec +++ b/specs/pipelines/cluster-resolvers.spec @@ -1,6 +1,19 @@ PIPELINES-23 # Cluster resolvers spec +Pre condition: + * Create project "releasetest-tasks" + * Apply + |S.NO|resource_dir | + |----|------------------------------------------------------------| + |1 |testdata/resolvers/tasks/resolver-task.yaml | + |2 |testdata/resolvers/tasks/resolver-task2.yaml | + * Create project "releasetest-pipelines" + * Apply + |S.NO|resource_dir | + |----|------------------------------------------------------------| + |1 |testdata/resolvers/pipelines/resolver-pipeline.yaml | + ## Checking the functionality of cluster resolvers#1: PIPELINES-23-TC01 Tags: e2e, sanity Component: Resolvers @@ -9,32 +22,16 @@ Type: Functional Importance: High Steps: - * Create project "releasetest-tasks" - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|------------------------------------------------------------| - |1 |testdata/resolvers/tasks/resolver-task2.yaml | - * Create project "releasetest-pipelines" - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|------------------------------------------------------------| - |1 |testdata/resolvers/pipelines/resolver-pipeline.yaml | - * Create project "releasetest-pipelineruns" - * Verify ServiceAccount "pipeline" exist + * Switch to autogenerated namespace * Create |S.NO|resource_dir | |----|--------------------------------------------------------------------------| |1 |testdata/resolvers/pipelineruns/resolver-pipelinerun.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation | - |----|-----------------------------------|--------------------------------------| - |1 |resolver-pipelinerun |successful |no | - * Delete project "releasetest-tasks" - * Delete project "releasetest-pipelines" - * Delete project "releasetest-pipelineruns" - + |S.NO|pipeline_run_name |status | + |----|-----------------------------------|------------| + |1 |resolver-pipelinerun |successful | + ## Checking the functionality of cluster resolvers#2: PIPELINES-23-TC02 Tags: e2e Component: Resolvers @@ -43,22 +40,17 @@ Type: Functional Importance: High Steps: - * Create project "releasetest-tasks" - * Verify ServiceAccount "pipeline" exist - * Create - |S.NO|resource_dir | - |----|------------------------------------------------------------| - |1 |testdata/resolvers/tasks/resolver-task.yaml | - * Create project "releasetest-pipelineruns" - * Verify ServiceAccount "pipeline" exist + * Switch to autogenerated namespace * Create |S.NO|resource_dir | |----|--------------------------------------------------------------------------| |1 |testdata/resolvers/pipelines/resolver-pipeline-same-ns.yaml | |2 |testdata/resolvers/pipelineruns/resolver-pipelinerun-same-ns.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation | - |----|-----------------------------------|--------------------------------------| - |1 |resolver-pipelinerun-same-ns |successful |no | - * Delete project "releasetest-tasks" - * Delete project "releasetest-pipelineruns" \ No newline at end of file + |S.NO|pipeline_run_name |status | + |----|-----------------------------------|------------| + |1 |resolver-pipelinerun-same-ns |successful | + +Teardown: + * Delete project "releasetest-tasks" + * Delete project "releasetest-pipelines" \ No newline at end of file diff --git a/specs/pipelines/fail.spec b/specs/pipelines/fail.spec index 31dd01fa..cf129cea 100644 --- a/specs/pipelines/fail.spec +++ b/specs/pipelines/fail.spec @@ -23,9 +23,9 @@ Steps: |----|-------------------------------------------| |1 |testdata/negative/v1beta1/pipelinerun.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|----------------------|-------|-----------------------| - |1 |output-pipeline-run-vb|Failure|no | + |S.NO|pipeline_run_name |status | + |----|----------------------|-------| + |1 |output-pipeline-run-vb|Failure| ## Run Task with a non-existent ServiceAccount: PIPELINES-02-TC02 Tags: e2e, tasks, negative, non-admin diff --git a/specs/pipelines/git-resolvers.spec b/specs/pipelines/git-resolvers.spec index ae8342f0..cdab1cb1 100644 --- a/specs/pipelines/git-resolvers.spec +++ b/specs/pipelines/git-resolvers.spec @@ -11,15 +11,14 @@ Type: Functional Importance: High Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------------------| |1 |testdata/resolvers/pipelineruns/git-resolver-pipelinerun.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation | - |----|-----------------------------------|--------------------------------------| - |1 |git-resolver-pipelinerun |successful |no | + |S.NO|pipeline_run_name |status | + |----|-----------------------------------|------------| + |1 |git-resolver-pipelinerun |successful | ## Test the functionality of git resolvers with authentication: PIPELINES-24-TC01 Tags: e2e @@ -29,12 +28,11 @@ Type: Functional Importance: High Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------------------| |1 |testdata/resolvers/pipelineruns/git-resolver-pipelinerun-private.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation | - |----|-----------------------------------|--------------------------------------| - |1 |git-resolver-pipelinerun-private |successful |no | \ No newline at end of file + |S.NO|pipeline_run_name |status | + |----|-----------------------------------|------------| + |1 |git-resolver-pipelinerun-private |successful | \ No newline at end of file diff --git a/specs/pipelines/http-resolvers.spec b/specs/pipelines/http-resolvers.spec new file mode 100644 index 00000000..ad2487e8 --- /dev/null +++ b/specs/pipelines/http-resolvers.spec @@ -0,0 +1,22 @@ +PIPELINES-31 +# http resolvers spec + +Pre condition: + * Validate Operator should be installed + +## Test the functionality of http resolvers: PIPELINES-31-TC01 +Tags: e2e, sanity +Component: Resolvers +Level: Integration +Type: Functional +Importance: High + +Steps: + * Create + |S.NO|resource_dir | + |----|-----------------------------------------------------------------| + |1 |testdata/resolvers/pipelineruns/http-resolver-pipelinerun.yaml | + * Verify pipelinerun + |S.NO|pipeline_run_name |status | + |----|-----------------------------------|------------| + |1 |http-resolver-pipelinerun |successful | \ No newline at end of file diff --git a/specs/pipelines/hub-resolvers.spec b/specs/pipelines/hub-resolvers.spec index 8580e0da..e6a10c97 100644 --- a/specs/pipelines/hub-resolvers.spec +++ b/specs/pipelines/hub-resolvers.spec @@ -9,7 +9,6 @@ Type: Functional Importance: High Steps: - * Verify ServiceAccount "pipeline" exist * Apply |S.NO|resource_dir | |----|-------------------------------------------------| @@ -17,6 +16,6 @@ Steps: |2 |testdata/pvc/pvc.yaml | |3 |testdata/resolvers/pipelineruns/git-cli-hub.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |hub-git-cli-run |successful|no | \ No newline at end of file + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |hub-git-cli-run |successful| \ No newline at end of file diff --git a/specs/pipelines/run.spec b/specs/pipelines/run.spec index c6b681fc..8f62c9dc 100644 --- a/specs/pipelines/run.spec +++ b/specs/pipelines/run.spec @@ -17,7 +17,6 @@ Run a sample pipeline that has 2 tasks: and verify that it runs succesfully Steps: - * Verify ServiceAccount "pipeline" exist * Verify that image stream "golang" exists * Create |S.NO|resource_dir | @@ -25,9 +24,9 @@ Steps: |1 |testdata/pvc/pvc.yaml | |2 |testdata/v1beta1/pipelinerun/pipelinerun.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |output-pipeline-run-v1b1|successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |output-pipeline-run-v1b1|successful| ## Pipelinerun Timeout failure Test: PIPELINES-03-TC04 Tags: e2e, pipelines, non-admin, sanity @@ -37,15 +36,14 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------| |1 |testdata/v1beta1/pipelinerun/pipelineruntimeout.yaml | * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|-------------------|-----------------------| - |1 |pear |timeout |no | + |S.NO|pipeline_run_name|status | + |----|-----------------|-------------------| + |1 |pear |timeout | ## Configure execution results at the Task level Test: PIPELINES-03-TC05 Tags: e2e, integration, pipelines, non-admin, sanity @@ -55,15 +53,14 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------| |1 |testdata/v1beta1/pipelinerun/task_results_example.yaml| * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------|----------|-----------------------| - |1 |task-level-results|successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------|----------| + |1 |task-level-results|successful| ## Cancel pipelinerun Test: PIPELINES-03-TC06 Tags: e2e, integration, pipelines, non-admin, sanity @@ -73,16 +70,15 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|---------------------------------------------| |1 |testdata/pvc/pvc.yaml | |2 |testdata/v1beta1/pipelinerun/pipelinerun.yaml| * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|---------|-----------------------| - |1 |output-pipeline-run-v1b1|cancelled|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|---------| + |1 |output-pipeline-run-v1b1|cancelled| ## Pipelinerun with pipelinespec and taskspec (embedded pipelinerun tests): PIPELINES-03-TC07 Tags: e2e, integration, pipelines, non-admin @@ -92,15 +88,14 @@ Type: Functional Importance: Critical Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|----------------------------------------------------------------------------| |1 |testdata/v1beta1/pipelinerun/pipelinerun-with-pipelinespec-and-taskspec.yaml| * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|-----------------------------------------|----------|-----------------------| - |1 |pipelinerun-with-pipelinespec-taskspec-vb|successful|no | + |S.NO|pipeline_run_name |status | + |----|-----------------------------------------|----------| + |1 |pipelinerun-with-pipelinespec-taskspec-vb|successful| ## Pipelinerun with large result: PIPELINES-03-TC08 Tags: e2e, integration, pipelines, non-admin, results, sanity @@ -111,12 +106,11 @@ Importance: Critical CustomerScenario: yes Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|---------------------------------------------------------------| |1 |testdata/v1beta1/pipelinerun/pipelinerun-with-large-result.yaml| * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |result-test-run |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |result-test-run |successful| diff --git a/specs/results/results.spec b/specs/results/results.spec index a822e0d6..b56b3f95 100644 --- a/specs/results/results.spec +++ b/specs/results/results.spec @@ -13,7 +13,6 @@ Importance: Critical Steps: * Verify that image stream "golang" exists -* Verify ServiceAccount "pipeline" exist * Apply |S.NO|resource_dir | |----|-----------------------------| @@ -35,16 +34,15 @@ Importance: Critical Steps: * Verify that image stream "golang" exists -* Verify ServiceAccount "pipeline" exist * Apply |S.NO|resource_dir | |----|---------------------------------| |1 |testdata/results/pipeline.yaml | |2 |testdata/results/pipelinerun.yaml| * Verify pipelinerun - |S.NO|pipeline_run_name|status |check_label_propagation| - |----|-----------------|----------|-----------------------| - |1 |pipeline-results |successful|no | + |S.NO|pipeline_run_name|status | + |----|-----------------|----------| + |1 |pipeline-results |successful| * Verify "pipelinerun" Results stored * Verify "pipelinerun" Results records * Verify "pipelinerun" Results logs \ No newline at end of file diff --git a/specs/triggers/eventlistener.spec b/specs/triggers/eventlistener.spec index a5a7dd4c..830ffc54 100644 --- a/specs/triggers/eventlistener.spec +++ b/specs/triggers/eventlistener.spec @@ -158,9 +158,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/push.json", with TLS "true" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|-------------------|----------|-----------------------| - |1 |simple-pipeline-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|-------------------|----------| + |1 |simple-pipeline-run|successful| * Cleanup Triggers ## Create Eventlistener embedded TriggersBindings specs: PIPELINES-05-TC08 @@ -185,9 +185,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/push.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|-------------------|----------|-----------------------| - |1 |simple-pipeline-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|-------------------|----------| + |1 |simple-pipeline-run|successful| * Cleanup Triggers ## Create embedded TriggersTemplate: PIPELINES-05-TC09 @@ -211,9 +211,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/push.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|-----------------------------------------|----------|-----------------------| - |1 |pipelinerun-with-taskspec-to-echo-message|successful|no | + |S.NO|pipeline_run_name |status | + |----|-----------------------------------------|----------| + |1 |pipelinerun-with-taskspec-to-echo-message|successful| * Cleanup Triggers ## Create Eventlistener with gitlab interceptor: PIPELINES-05-TC10 @@ -227,7 +227,6 @@ This scenario tests the creation of eventLister with gitlab interceptor, listens openshift-pipeline Resources defined under triggers-template, to deploy example app Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|--------------------------------------------------| @@ -237,9 +236,9 @@ Steps: * Mock post event to "gitlab" interceptor with event-type "Push Hook", payload "testdata/triggers/gitlab/gitlab-push-event.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------|----------|-----------------------| - |1 |gitlab-run |successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------|----------| + |1 |gitlab-run |successful| * Cleanup Triggers ## Create Eventlistener with bitbucket interceptor: PIPELINES-05-TC11 @@ -253,7 +252,6 @@ This scenario tests the creation of eventLister with bitbucket interceptor, list openshift-pipeline Resources defined under triggers-template Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|--------------------------------------------------------------------| @@ -279,7 +277,6 @@ This scenario tests Github `push` event via CTB, on each event it creates/trigge openshift-pipeline Resources defined under triggers-template Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|------------------------------------------------------------------| @@ -290,9 +287,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/triggers/github-ctb/push.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |pipelinerun-git-push-ctb|successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |pipelinerun-git-push-ctb|successful| * Cleanup Triggers ## Verify Github pull_request event with Embbeded TriggerTemplate using Github-CTB: PIPELINES-05-TC13 @@ -306,7 +303,6 @@ This scenario tests Github `pull_request` event via CTB, on each event it create openshift-pipeline Resources defined under triggers-template Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|----------------------------------------------------------------| @@ -317,9 +313,9 @@ Steps: * Mock post event to "github" interceptor with event-type "pull_request", payload "testdata/triggers/github-ctb/pr.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |pipelinerun-git-pr-ctb |successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |pipelinerun-git-pr-ctb |successful| * Cleanup Triggers ## Verify Github pr_review event with Embbeded TriggerTemplate using Github-CTB: PIPELINES-05-TC14 @@ -333,7 +329,6 @@ This scenario tests Github `issue_comment` event via CTB, on each event it creat openshift-pipeline Resources defined under triggers-template Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------------------------| @@ -344,9 +339,9 @@ Steps: * Mock post event to "github" interceptor with event-type "issue_comment", payload "testdata/triggers/github-ctb/issue-comment.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|---------------------------------|----------|-----------------------| - |1 |pipelinerun-git-pr-review-ctb |successful|no | + |S.NO|pipeline_run_name |status | + |----|---------------------------------|----------| + |1 |pipelinerun-git-pr-review-ctb |successful| * Cleanup Triggers ## Create TriggersCRD resource with CEL interceptors (overlays): PIPELINES-05-TC15 @@ -360,7 +355,6 @@ This scenario tests the creation of Trigger resource which is combination of Tri openshift-pipeline Resources defined under triggers-template Steps: - * Verify ServiceAccount "pipeline" exist * Create |S.NO|resource_dir | |----|-----------------------------------------------------------| @@ -374,9 +368,9 @@ Steps: * Mock post event to "github" interceptor with event-type "pull_request", payload "testdata/triggers/triggersCRD/pull-request.json", with TLS "false" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|------------------------|----------|-----------------------| - |1 |parallel-pipelinerun |successful|no | + |S.NO|pipeline_run_name |status | + |----|------------------------|----------| + |1 |parallel-pipelinerun |successful| * Cleanup Triggers ## Create multiple Eventlistener with TLS enabled: PIPELINES-05-TC16 @@ -403,9 +397,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/push.json", with TLS "true" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|-------------------|----------|-----------------------| - |1 |simple-pipeline-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|-------------------|----------| + |1 |simple-pipeline-run|successful| * Create |S.NO|resource_dir | |----|----------------------------------------------------------------------| @@ -415,9 +409,9 @@ Steps: * Mock post event to "github" interceptor with event-type "push", payload "testdata/push.json", with TLS "true" * Assert eventlistener response * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|---------------------|----------|-----------------------| - |1 |simple-pipeline-run-2|successful|no | + |S.NO|pipeline_run_name |status | + |----|---------------------|----------| + |1 |simple-pipeline-run-2|successful| * Cleanup Triggers ## Create Eventlistener with github interceptor And verify Kuberenetes Events: PIPELINES-05-TC17 @@ -443,7 +437,7 @@ Steps: * Assert eventlistener response * Verify kubernetes events for eventlistener * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|-------------------|----------|-----------------------| - |1 |simple-pipeline-run|successful|no | + |S.NO|pipeline_run_name |status | + |----|-------------------|----------| + |1 |simple-pipeline-run|successful| * Cleanup Triggers diff --git a/specs/triggers/tutorial.spec b/specs/triggers/tutorial.spec index 67421ca0..e756d4ca 100644 --- a/specs/triggers/tutorial.spec +++ b/specs/triggers/tutorial.spec @@ -14,7 +14,6 @@ Importance: Critical This scenario tests the pipeline tutorials (https://github.com/openshift/pipelines-tutorial) pipelines related resources Steps: - * Verify ServiceAccount "pipeline" exist * Create remote |S.NO|resource_dir | |----|-----------------------------------------------------------------------------------------------------------------------------------------| @@ -23,21 +22,16 @@ Steps: |3 |https://raw.githubusercontent.com/openshift/pipelines-tutorial/{OSP_TUTORIAL_BRANCH}/01_pipeline/03_persistent_volume_claim.yaml | |4 |https://raw.githubusercontent.com/openshift/pipelines-tutorial/{OSP_TUTORIAL_BRANCH}/01_pipeline/04_pipeline.yaml | |5 |https://raw.githubusercontent.com/openshift/pipelines-tutorial/{OSP_TUTORIAL_BRANCH}/02_pipelinerun/01_build_deploy_api_pipelinerun.yaml | + |6 |https://raw.githubusercontent.com/openshift/pipelines-tutorial/{OSP_TUTORIAL_BRANCH}/02_pipelinerun/02_build_deploy_ui_pipelinerun.yaml| * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|----------------------------|----------|-----------------------| - |1 |build-deploy-api-pipelinerun|successful|no | - * Create remote - |S.NO|resource_dir | - |----|---------------------------------------------------------------------------------------------------------------------------------------| - |1 |https://raw.githubusercontent.com/openshift/pipelines-tutorial/{OSP_TUTORIAL_BRANCH}/02_pipelinerun/02_build_deploy_ui_pipelinerun.yaml| - * Verify pipelinerun - |S.NO|pipeline_run_name |status |check_label_propagation| - |----|---------------------------|----------|-----------------------| - |1 |build-deploy-ui-pipelinerun|successful|no | + |S.NO|pipeline_run_name |status | + |----|----------------------------|----------| + |1 |build-deploy-api-pipelinerun|successful| + |2 |build-deploy-ui-pipelinerun |successful| * Get route url of the route "pipelines-vote-ui" - * Wait for pipelines-vote-ui deployment - * Validate route url for pipelines tutorial + * Wait for "pipelines-vote-api" deployment + * Wait for "pipelines-vote-ui" deployment + * Validate that route URL contains "Cat 🐺 vs Dog 🐶" ## Run pipelines tutorial using triggers: PIPELINES-06-TC02 Tags: e2e, integration, triggers, non-admin, tutorial, sanity @@ -49,7 +43,6 @@ Importance: Critical This scenario tests the pipeline tutorials (https://github.com/openshift/pipelines-tutorial) triggers related resources Steps: - * Verify ServiceAccount "pipeline" exist * Create remote |S.NO|resource_dir | |----|-----------------------------------------------------------------------------------------------------------------------------------------| @@ -71,5 +64,5 @@ Steps: * "2" pipelinerun(s) should be present within "15" seconds * Verify the latest pipelinerun for "successful" state * Get route url of the route "pipelines-vote-ui" - * Wait for pipelines-vote-ui deployment - * Validate route url for pipelines tutorial + * Wait for "pipelines-vote-ui" deployment + * Validate that route URL contains "Cat 🐺 vs Dog 🐶" diff --git a/specs/versions.spec b/specs/versions.spec index 3eac16e9..cbe79661 100644 --- a/specs/versions.spec +++ b/specs/versions.spec @@ -29,3 +29,4 @@ Steps: * Check "tkn" client version * Check "tkn-pac" version * Check "opc" client version + * Check "opc" server version \ No newline at end of file diff --git a/steps/cli/oc.go b/steps/cli/oc.go index fcd0c0aa..ee875d60 100644 --- a/steps/cli/oc.go +++ b/steps/cli/oc.go @@ -13,6 +13,7 @@ import ( "github.com/getgauge-contrib/gauge-go/testsuit" "github.com/openshift-pipelines/release-tests/pkg/oc" "github.com/openshift-pipelines/release-tests/pkg/openshift" + "github.com/openshift-pipelines/release-tests/pkg/operator" "github.com/openshift-pipelines/release-tests/pkg/store" ) @@ -138,6 +139,15 @@ var _ = gauge.Step("Update addon config with clusterTasks as } }) +var _ = gauge.Step("Update addon config with resolverTasks as and pipelineTemplates as and expect message ", func(resolverTaskStatus, pipeTemplateStatus, expectedMessage string) { + patchData := fmt.Sprintf("{\"spec\":{\"addon\":{\"params\":[{\"name\":\"resolverTasks\",\"value\":\"%s\"},{\"name\":\"pipelineTemplates\",\"value\":\"%s\"}]}}}", resolverTaskStatus, pipeTemplateStatus) + if expectedMessage == "" { + oc.UpdateTektonConfig(patchData) + } else { + oc.UpdateTektonConfigwithInvalidData(patchData, expectedMessage) + } +}) + var _ = gauge.Step("Update addon config with resolverTasks as and expect message ", func(resolverTasksStatus, expectedMessage string) { patchData := fmt.Sprintf("{\"spec\":{\"addon\":{\"params\":[{\"name\":\"resolverTasks\",\"value\":\"%s\"}]}}}", resolverTasksStatus) if expectedMessage == "" { @@ -147,6 +157,23 @@ var _ = gauge.Step("Update addon config with resolverTasks as and expect message ", func(resolverStepActionsStatus, expectedMessage string) { + patchData := fmt.Sprintf("{\"spec\":{\"addon\":{\"params\":[{\"name\":\"resolverStepActions\",\"value\":\"%s\"}]}}}", resolverStepActionsStatus) + if expectedMessage == "" { + oc.UpdateTektonConfig(patchData) + } else { + oc.UpdateTektonConfigwithInvalidData(patchData, expectedMessage) + } +}) + +var _ = gauge.Step("Verify versioned ecosystem tasks", func() { + operator.VerifyVersionedTasks() +}) + +var _ = gauge.Step("Verify versioned ecosystem step actions", func() { + operator.VerifyVersionedStepActions() +}) + var _ = gauge.Step("Create project ", func(projectName string) { log.Printf("Check if project %v already exists", projectName) if oc.CheckProjectExists(projectName) { @@ -159,6 +186,16 @@ var _ = gauge.Step("Create project ", func(projectName string) { gauge.GetScenarioStore()["namespace"] = projectName }) +var _ = gauge.Step("Switch to autogenerated namespace", func() { + gauge_store := gauge.GetScenarioStore() + autogenerated_ns := gauge_store["autogenerated"].(string) + if oc.CheckProjectExists(autogenerated_ns) { + log.Printf("Switch to project %v", autogenerated_ns) + } + store.Clients().NewClientSet(autogenerated_ns) + gauge.GetScenarioStore()["namespace"] = autogenerated_ns +}) + var _ = gauge.Step("Delete project ", func(projectName string) { log.Printf("Deleting project %v", projectName) oc.DeleteProjectIgnoreErors(projectName) diff --git a/steps/cli/tkn.go b/steps/cli/tkn.go index a48538eb..63251f3b 100644 --- a/steps/cli/tkn.go +++ b/steps/cli/tkn.go @@ -32,8 +32,9 @@ var _ = gauge.Step("Start and verify pipeline with param with values go func(pipelineName string, params map[string]string, workspaces map[string]string) { defer wg.Done() - pipelineRunName := tkn.StartPipeline(pipelineName, params, workspaces, store.Namespace(), "--use-param-defaults") - pipelines.ValidatePipelineRun(store.Clients(), pipelineRunName, "successful", "no", store.Namespace()) + customPipelineRunName := pipelineName + "-run-" + value + pipelineRunName := tkn.StartPipeline(pipelineName, params, workspaces, store.Namespace(), "--use-param-defaults", "--prefix-name", customPipelineRunName) + pipelines.ValidatePipelineRun(store.Clients(), pipelineRunName, "successful", store.Namespace()) }(pipelineName, params, workspaces) time.Sleep(3 * time.Second) diff --git a/steps/hooks.go b/steps/hooks.go index 39cb1a88..729442da 100644 --- a/steps/hooks.go +++ b/steps/hooks.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "slices" "strconv" "strings" @@ -27,9 +28,20 @@ var _ = gauge.BeforeScenario(func(exInfo *gauge_messages.ExecutionInfo) { store := gauge.GetScenarioStore() store["crnames"] = crNames store["clients"] = cs + store["autogenerated"] = namespace store["namespace"] = namespace store["scenario.cleanup"] = cleanup store["targetNamespace"] = config.TargetNamespace + + // Skip pipelines SA check if scenario has @install tag + if slices.Contains(exInfo.CurrentScenario.Tags, "install") { + log.Printf("Skipping service account check as the scenario has @install tag") + } else { + sa := k8s.WaitForServiceAccount(cs, namespace, "pipeline") + if sa == nil { + testsuit.T.Fail(fmt.Errorf("service account 'pipeline' not available in namespace %s", namespace)) + } + } }, []string{}, testsuit.AND) // Runs After every Secenario @@ -76,7 +88,7 @@ var _ = gauge.BeforeSpec(func(exInfo *gauge_messages.ExecutionInfo) { } log.Print("Annotating the namespaces with 'operator.tekton.dev/prune.skip=true' so that the pipelineruns should not get deleted") for _, ns := range namespaces.Items { - if !(strings.HasPrefix(ns.Name, "openshift-") || strings.HasPrefix(ns.Name, "kube-") || ns.Name == "default") { + if !strings.HasPrefix(ns.Name, "openshift-") && !strings.HasPrefix(ns.Name, "kube-") && ns.Name != "default" { oc.AnnotateNamespaceIgnoreErrors(ns.Name, "operator.tekton.dev/prune.skip=true") } } diff --git a/steps/olm/operator.go b/steps/olm/operator.go index 89d36c09..1695c981 100644 --- a/steps/olm/operator.go +++ b/steps/olm/operator.go @@ -15,6 +15,7 @@ import ( "github.com/openshift-pipelines/release-tests/pkg/olm" "github.com/openshift-pipelines/release-tests/pkg/openshift" "github.com/openshift-pipelines/release-tests/pkg/operator" + "github.com/openshift-pipelines/release-tests/pkg/pipelines" "github.com/openshift-pipelines/release-tests/pkg/store" "github.com/openshift-pipelines/release-tests/pkg/tkn" ) @@ -135,6 +136,10 @@ var _ = gauge.Step("Check client version", func(binary string) { tkn.AssertClientVersion(binary) }) +var _ = gauge.Step("Check server version", func(binary string) { + tkn.AssertServerVersion(binary) +}) + var _ = gauge.Step("Check version", func(binary string) { tkn.AssertClientVersion(binary) }) @@ -160,7 +165,7 @@ var _ = gauge.Step("Create Results route", func() { }) var _ = gauge.Step("Verify Results stored", func(resourceType string) { - operator.VerifyResultsStored(resourceType) + operator.VerifyResultsAnnotationStored(resourceType) }) var _ = gauge.Step("Verify Results records", func(resourceType string) { @@ -187,3 +192,7 @@ var _ = gauge.Step("Create signing-secrets for Tekton Chains", func() { var _ = gauge.Step("Store Cosign public key in file", func() { operator.CreateFileWithCosignPubKey() }) + +var _ = gauge.Step("Verify version from the pipelinerun logs", func(binary string) { + pipelines.CheckLogVersion(store.Clients(), binary, store.Namespace()) +}) diff --git a/steps/pac/pac.go b/steps/pac/pac.go index 8037fc84..18839aac 100644 --- a/steps/pac/pac.go +++ b/steps/pac/pac.go @@ -21,6 +21,6 @@ var _ = gauge.Step("Configure GitLab repo and validate pipelinerun", func() { client := pac.InitGitLabClient() project := pac.SetupGitLabProject(client) pipelineName := pac.ConfigurePreviewChanges(client, project.ID) - pipelines.ValidatePipelineRun(store.Clients(), pipelineName, "successful", "no", store.Namespace()) + pipelines.ValidatePipelineRun(store.Clients(), pipelineName, "successful", store.Namespace()) pac.CleanupPAC(client, store.Clients(), project.ID, store.GetScenarioData("smee_deployment_name"), store.Namespace()) }) diff --git a/steps/pipeline/pipeline.go b/steps/pipeline/pipeline.go index 022c4194..c9f02de1 100644 --- a/steps/pipeline/pipeline.go +++ b/steps/pipeline/pipeline.go @@ -8,7 +8,6 @@ import ( "github.com/getgauge-contrib/gauge-go/gauge" m "github.com/getgauge-contrib/gauge-go/models" "github.com/getgauge-contrib/gauge-go/testsuit" - "github.com/openshift-pipelines/release-tests/pkg/config" "github.com/openshift-pipelines/release-tests/pkg/pipelines" "github.com/openshift-pipelines/release-tests/pkg/store" ) @@ -25,8 +24,7 @@ var _ = gauge.Step("Verify pipelinerun ", func(table *m.Table) { for _, row := range table.Rows { prname := row.Cells[1] status := row.Cells[2] - labelCheck := row.Cells[3] - pipelines.ValidatePipelineRun(store.Clients(), prname, status, labelCheck, store.Namespace()) + pipelines.ValidatePipelineRun(store.Clients(), prname, status, store.Namespace()) } }) @@ -50,23 +48,6 @@ var _ = gauge.Step(" taskrun(s) should be present within clustertasks are ", func(cts, status string) { - if cts == "community" { - cts = config.CommunityClustertasks - } - log.Printf("Checking if clustertasks %v is/are %v", cts, status) - ctsList := strings.Split(cts, ",") - if status == "present" { - for _, c := range ctsList { - pipelines.AssertClustertaskPresent(store.Clients(), c) - } - } else { - for _, c := range ctsList { - pipelines.AssertClustertaskNotPresent(store.Clients(), c) - } - } -}) - var _ = gauge.Step("Tasks are in namespace ", func(ts, status string, namespace string) { log.Printf("Checking if tasks %v is/are %v in namespace %v", ts, status, namespace) tsList := strings.Split(ts, ",") @@ -81,6 +62,20 @@ var _ = gauge.Step("Tasks are in namespace ", func(ts, } }) +var _ = gauge.Step("StepActions are in namespace ", func(stepActions, status string, namespace string) { + log.Printf("Checking if stepactions %v is/are %v in namespace %v", stepActions, status, namespace) + saList := strings.Split(stepActions, ",") + if status == "present" { + for _, c := range saList { + pipelines.AssertStepActionPresent(store.Clients(), namespace, c) + } + } else { + for _, c := range saList { + pipelines.AssertStepActionNotPresent(store.Clients(), namespace, c) + } + } +}) + var _ = gauge.Step("Assert pipelines are in namespace", func(status, namespace string) { if status == "present" { pipelines.AssertPipelinesPresent(store.Clients(), namespace) @@ -95,5 +90,5 @@ var _ = gauge.Step("Verify the latest pipelinerun for state", func(state if err != nil { testsuit.T.Fail(fmt.Errorf("failed to get pipelinerun from %s: %v", namespace, err)) } - pipelines.ValidatePipelineRun(store.Clients(), prname, state, "no", namespace) + pipelines.ValidatePipelineRun(store.Clients(), prname, state, namespace) }) diff --git a/steps/triggers/triggers.go b/steps/triggers/triggers.go index 2bc7a744..68eaaa8d 100644 --- a/steps/triggers/triggers.go +++ b/steps/triggers/triggers.go @@ -20,6 +20,10 @@ var _ = gauge.Step("Expose Event listener for TLS ", func(elname string) store.PutScenarioData("elname", elname) }) +var _ = gauge.Step("Expose Deployment config on port ", func(elname, port string) { + triggers.ExposeDeploymentConfig(store.Clients(), elname, port, store.Namespace()) +}) + var _ = gauge.Step("Mock post event with empty payload", func() { gauge.GetScenarioStore()["response"] = triggers.MockPostEventWithEmptyPayload(store.GetScenarioData("route")) }) diff --git a/steps/utility/utility.go b/steps/utility/utility.go index 754865db..54d0863c 100644 --- a/steps/utility/utility.go +++ b/steps/utility/utility.go @@ -42,16 +42,15 @@ var _ = gauge.Step("Switch to project ", func(projectName string) { gauge.GetScenarioStore()["namespace"] = projectName }) -var _ = gauge.Step("Validate route url for pipelines tutorial", func() { - expectedOutput := "Cat 🐺 vs Dog 🐶" +var _ = gauge.Step("Validate that route URL contains ", func(expectedOutput string) { routeUrl := store.GetScenarioData("routeurl") - output := cmd.MustSuccedIncreasedTimeout(30*time.Second, "lynx", routeUrl, "--dump").Stdout() - log.Println(output) + output := cmd.MustSuccedIncreasedTimeout(90*time.Second, "curl", "-kL", routeUrl).Stdout() if !strings.Contains(output, expectedOutput) { testsuit.T.Fail(fmt.Errorf("expected:\n%v,\ngot:\n%v", expectedOutput, output)) + log.Println(output) } }) -var _ = gauge.Step("Wait for pipelines-vote-ui deployment", func() { - k8s.ValidateDeployments(store.Clients(), store.Namespace(), "pipelines-vote-ui") +var _ = gauge.Step("Wait for deployment", func(deploymentName string) { + k8s.ValidateDeployments(store.Clients(), store.Namespace(), deploymentName) }) diff --git a/tc_spec_map.json b/tc_spec_map.json index 036fc7f0..e1bb1cd6 100644 --- a/tc_spec_map.json +++ b/tc_spec_map.json @@ -28,5 +28,8 @@ "PIPELINES-27": "specs/chains/chains.spec", "PIPELINES-28": "specs/operator/manual-approval-gate.spec", "PIPELINES-29": "specs/ecosystem/ecosystem.spec", - "PIPELINES-30": "specs/pac/pac-gitlab.spec" + "PIPELINES-30": "specs/pac/pac-gitlab.spec", + "PIPELINES-31": "specs/pipelines/http-resolvers.spec", + "PIPELINES-32": "specs/ecosystem/ecosystem-multiarch.spec", + "PIPELINES-33": "specs/ecosystem/ecosystem-s2i.spec" } diff --git a/testdata/v1/clustertask/configmaps/maven-settings.yaml b/testdata/ecosystem/configmaps/maven-settings.yaml similarity index 100% rename from testdata/v1/clustertask/configmaps/maven-settings.yaml rename to testdata/ecosystem/configmaps/maven-settings.yaml diff --git a/testdata/ecosystem/deploymentconfigs/nodejs-ex-git.yaml b/testdata/ecosystem/deploymentconfigs/nodejs-ex-git.yaml new file mode 100644 index 00000000..8a7b7f81 --- /dev/null +++ b/testdata/ecosystem/deploymentconfigs/nodejs-ex-git.yaml @@ -0,0 +1,31 @@ +kind: DeploymentConfig +apiVersion: apps.openshift.io/v1 +metadata: + name: nodejs-ex-git + labels: + app: nodejs-ex-git +spec: + strategy: + type: Rolling + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - nodejs-ex-git + from: + kind: ImageStreamTag + name: 'nodejs-ex-git:latest' + - type: ConfigChange + replicas: 1 + template: + metadata: + name: nodejs-ex-git + labels: + app: nodejs-ex-git + spec: + containers: + - name: nodejs-ex-git + ports: + - containerPort: 8080 + protocol: TCP \ No newline at end of file diff --git a/testdata/ecosystem/imagestreams/nodejs-ex-git.yaml b/testdata/ecosystem/imagestreams/nodejs-ex-git.yaml new file mode 100644 index 00000000..7858f467 --- /dev/null +++ b/testdata/ecosystem/imagestreams/nodejs-ex-git.yaml @@ -0,0 +1,4 @@ +apiVersion: image.openshift.io/v1 +kind: ImageStream +metadata: + name: nodejs-ex-git \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/jib-maven-pz.yaml b/testdata/ecosystem/pipelineruns/jib-maven-pz.yaml similarity index 100% rename from testdata/v1/clustertask/pipelineruns/jib-maven-pz.yaml rename to testdata/ecosystem/pipelineruns/jib-maven-pz.yaml diff --git a/testdata/v1/clustertask/pipelineruns/jib-maven.yaml b/testdata/ecosystem/pipelineruns/jib-maven.yaml similarity index 100% rename from testdata/v1/clustertask/pipelineruns/jib-maven.yaml rename to testdata/ecosystem/pipelineruns/jib-maven.yaml diff --git a/testdata/v1/clustertask/pipelineruns/kn-apply-multiarch.yaml b/testdata/ecosystem/pipelineruns/kn-apply-multiarch.yaml similarity index 63% rename from testdata/v1/clustertask/pipelineruns/kn-apply-multiarch.yaml rename to testdata/ecosystem/pipelineruns/kn-apply-multiarch.yaml index 99e43018..91b8184a 100644 --- a/testdata/v1/clustertask/pipelineruns/kn-apply-multiarch.yaml +++ b/testdata/ecosystem/pipelineruns/kn-apply-multiarch.yaml @@ -7,8 +7,14 @@ spec: tasks: - name: run-kn-apply taskRef: - name: kn-apply - kind: ClusterTask + resolver: cluster + params: + - name: kind + value: task + - name: name + value: kn-apply + - name: namespace + value: openshift-pipelines params: - name: SERVICE value: "hello-apply" diff --git a/testdata/v1/clustertask/pipelineruns/kn-apply.yaml b/testdata/ecosystem/pipelineruns/kn-apply.yaml similarity index 62% rename from testdata/v1/clustertask/pipelineruns/kn-apply.yaml rename to testdata/ecosystem/pipelineruns/kn-apply.yaml index 17ff52a3..8e6f2967 100644 --- a/testdata/v1/clustertask/pipelineruns/kn-apply.yaml +++ b/testdata/ecosystem/pipelineruns/kn-apply.yaml @@ -7,8 +7,14 @@ spec: tasks: - name: run-kn-apply taskRef: - name: kn-apply - kind: ClusterTask + resolver: cluster + params: + - name: kind + value: task + - name: name + value: kn-apply + - name: namespace + value: openshift-pipelines params: - name: SERVICE value: "hello-apply" diff --git a/testdata/v1/clustertask/pipelineruns/kn-pz.yaml b/testdata/ecosystem/pipelineruns/kn-pz.yaml similarity index 67% rename from testdata/v1/clustertask/pipelineruns/kn-pz.yaml rename to testdata/ecosystem/pipelineruns/kn-pz.yaml index d5d93ee0..18c4320b 100644 --- a/testdata/v1/clustertask/pipelineruns/kn-pz.yaml +++ b/testdata/ecosystem/pipelineruns/kn-pz.yaml @@ -7,8 +7,14 @@ spec: tasks: - name: run-kn taskRef: - name: kn - kind: ClusterTask + resolver: cluster + params: + - name: kind + value: task + - name: name + value: kn + - name: namespace + value: openshift-pipelines params: - name: ARGS value: diff --git a/testdata/ecosystem/pipelineruns/nodejs-ex-git.yaml b/testdata/ecosystem/pipelineruns/nodejs-ex-git.yaml new file mode 100644 index 00000000..6a9681c0 --- /dev/null +++ b/testdata/ecosystem/pipelineruns/nodejs-ex-git.yaml @@ -0,0 +1,16 @@ +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + name: nodejs-ex-git-pr +spec: + pipelineRef: + name: nodejs-ex-git + workspaces: + - name: workspace + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/testdata/ecosystem/pipelines/git-clone-read-private copy.yaml b/testdata/ecosystem/pipelines/git-clone-read-private copy.yaml deleted file mode 100644 index 4cd09fb9..00000000 --- a/testdata/ecosystem/pipelines/git-clone-read-private copy.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: Pipeline -metadata: - name: git-clone-read-private-pipeline -spec: - workspaces: - - name: input - optional: true - - name: shared-workspace - tasks: - - name: git-clone - taskRef: - resolver: cluster - params: - - name: kind - value: task - - name: name - value: git-clone - - name: namespace - value: openshift-pipelines - workspaces: - - name: output - workspace: shared-workspace - params: - - name: URL - value: git@github.com:openshift-pipelines/test-private.git - results: - - name: COMMIT - value: $(tasks.git-clone.results.COMMIT) diff --git a/testdata/v1/clustertask/pipelines/jib-maven-pz.yaml b/testdata/ecosystem/pipelines/jib-maven-pz.yaml similarity index 70% rename from testdata/v1/clustertask/pipelines/jib-maven-pz.yaml rename to testdata/ecosystem/pipelines/jib-maven-pz.yaml index f068ceff..ce53678f 100644 --- a/testdata/v1/clustertask/pipelines/jib-maven-pz.yaml +++ b/testdata/ecosystem/pipelines/jib-maven-pz.yaml @@ -12,8 +12,14 @@ spec: tasks: - name: clone-git-repo taskRef: - name: git-clone - kind: ClusterTask + resolver: cluster + params: + - name: kind + value: task + - name: name + value: git-clone + - name: namespace + value: openshift-pipelines workspaces: - name: output workspace: source @@ -26,8 +32,14 @@ spec: value: "true" - name: build taskRef: - name: jib-maven - kind: ClusterTask + resolver: cluster + params: + - name: kind + value: task + - name: name + value: jib-maven + - name: namespace + value: openshift-pipelines runAfter: - clone-git-repo workspaces: diff --git a/testdata/v1/clustertask/pipelines/jib-maven.yaml b/testdata/ecosystem/pipelines/jib-maven.yaml similarity index 63% rename from testdata/v1/clustertask/pipelines/jib-maven.yaml rename to testdata/ecosystem/pipelines/jib-maven.yaml index 51c45ec9..a6b07a1a 100644 --- a/testdata/v1/clustertask/pipelines/jib-maven.yaml +++ b/testdata/ecosystem/pipelines/jib-maven.yaml @@ -12,22 +12,36 @@ spec: tasks: - name: clone-git-repo taskRef: - name: git-clone - kind: ClusterTask + resolver: cluster + params: + - name: kind + value: task + - name: name + value: git-clone + - name: namespace + value: openshift-pipelines workspaces: - name: output workspace: source params: - - name: url + - name: URL value: https://github.com/che-samples/console-java-simple - - name: subdirectory + - name: REVISION + value: master + - name: SUBDIRECTORY value: $(params.SUBDIR) - - name: deleteExisting + - name: DELETE_EXISTING value: "true" - name: build taskRef: - name: jib-maven - kind: ClusterTask + resolver: cluster + params: + - name: kind + value: task + - name: name + value: jib-maven + - name: namespace + value: openshift-pipelines runAfter: - clone-git-repo workspaces: diff --git a/testdata/v1beta1/pipelinerun/s2i-nodejs-pipelinerun.yaml b/testdata/ecosystem/pipelines/nodejs-ex-git.yaml similarity index 52% rename from testdata/v1beta1/pipelinerun/s2i-nodejs-pipelinerun.yaml rename to testdata/ecosystem/pipelines/nodejs-ex-git.yaml index 98008b57..63660110 100644 --- a/testdata/v1beta1/pipelinerun/s2i-nodejs-pipelinerun.yaml +++ b/testdata/ecosystem/pipelines/nodejs-ex-git.yaml @@ -1,40 +1,3 @@ -apiVersion: image.openshift.io/v1 -kind: ImageStream -metadata: - name: nodejs-ex-git ---- -kind: DeploymentConfig -apiVersion: apps.openshift.io/v1 -metadata: - name: nodejs-ex-git - labels: - app: nodejs-ex-git -spec: - strategy: - type: Rolling - triggers: - - type: ImageChange - imageChangeParams: - automatic: true - containerNames: - - nodejs-ex-git - from: - kind: ImageStreamTag - name: 'nodejs-ex-git:latest' - - type: ConfigChange - replicas: 1 - template: - metadata: - name: nodejs-ex-git - labels: - app: nodejs-ex-git - spec: - containers: - - name: nodejs-ex-git - ports: - - containerPort: 8080 - protocol: TCP ---- apiVersion: tekton.dev/v1beta1 kind: Pipeline metadata: @@ -56,7 +19,7 @@ spec: tasks: - name: fetch-repository params: - - name: url + - name: URL value: $(params.GIT_REPO) - name: revision value: $(params.GIT_REVISION) @@ -65,8 +28,14 @@ spec: - name: deleteExisting value: 'true' taskRef: - kind: ClusterTask - name: git-clone + resolver: cluster + params: + - name: kind + value: task + - name: name + value: git-clone + - name: namespace + value: openshift-pipelines workspaces: - name: output workspace: workspace @@ -83,8 +52,14 @@ spec: runAfter: - fetch-repository taskRef: - kind: ClusterTask - name: s2i-nodejs + resolver: cluster + params: + - name: kind + value: task + - name: name + value: s2i-nodejs + - name: namespace + value: openshift-pipelines workspaces: - name: source workspace: workspace @@ -95,24 +70,13 @@ spec: runAfter: - build taskRef: - kind: ClusterTask - name: openshift-client - workspaces: - - name: workspace ---- -apiVersion: tekton.dev/v1beta1 -kind: PipelineRun -metadata: - name: nodejs-ex-git-pr -spec: - pipelineRef: - name: nodejs-ex-git + resolver: cluster + params: + - name: kind + value: task + - name: name + value: openshift-client + - name: namespace + value: openshift-pipelines workspaces: - - name: workspace - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi + - name: workspace \ No newline at end of file diff --git a/testdata/ecosystem/pipelines/s2i-php.yaml b/testdata/ecosystem/pipelines/s2i-php.yaml index afffcedc..66f39707 100644 --- a/testdata/ecosystem/pipelines/s2i-php.yaml +++ b/testdata/ecosystem/pipelines/s2i-php.yaml @@ -25,6 +25,8 @@ spec: params: - name: URL value: https://github.com/sclorg/s2i-php-container + - name: REVISION + value: master - name: SUBDIRECTORY value: s2i-php-$(params.VERSION) - name: DELETE_EXISTING diff --git a/testdata/ecosystem/secrets/ssh-key.yaml b/testdata/ecosystem/secrets/ssh-key.yaml index 742ad78c..81c4a0b7 100644 --- a/testdata/ecosystem/secrets/ssh-key.yaml +++ b/testdata/ecosystem/secrets/ssh-key.yaml @@ -1,3 +1,4 @@ +# This private key is only associated with repository github.com/ppitonak/topsecret and has read-only access apiVersion: v1 kind: Secret type: kubernetes.io/ssh-auth diff --git a/testdata/manualapprovalgate/manual-approval-pipeline.yaml b/testdata/manualapprovalgate/manual-approval-pipeline.yaml index cb568008..72835e84 100644 --- a/testdata/manualapprovalgate/manual-approval-pipeline.yaml +++ b/testdata/manualapprovalgate/manual-approval-pipeline.yaml @@ -21,6 +21,7 @@ spec: value: - kube:admin - system:admin + - cluster-admin - tekton - name: numberOfApprovalsRequired value: 1 diff --git a/testdata/pruner/pipeline/pipeline-for-pruner.yaml b/testdata/pruner/pipeline/pipeline-for-pruner.yaml index 27c668a1..ba533b7a 100644 --- a/testdata/pruner/pipeline/pipeline-for-pruner.yaml +++ b/testdata/pruner/pipeline/pipeline-for-pruner.yaml @@ -7,7 +7,7 @@ spec: - name: message type: string steps: - - image: registry.redhat.io/ubi7/ubi-minimal + - image: registry.redhat.io/ubi8/ubi-minimal script: | echo $(params.message) --- diff --git a/testdata/pruner/task/task-for-pruner.yaml b/testdata/pruner/task/task-for-pruner.yaml index 90305093..d802e8b8 100644 --- a/testdata/pruner/task/task-for-pruner.yaml +++ b/testdata/pruner/task/task-for-pruner.yaml @@ -7,7 +7,7 @@ spec: - name: message type: string steps: - - image: registry.redhat.io/ubi7/ubi-minimal + - image: registry.redhat.io/ubi8/ubi-minimal script: | echo $(params.message) diff --git a/testdata/resolvers/pipelineruns/http-resolver-pipelinerun.yaml b/testdata/resolvers/pipelineruns/http-resolver-pipelinerun.yaml new file mode 100644 index 00000000..5dcab6fe --- /dev/null +++ b/testdata/resolvers/pipelineruns/http-resolver-pipelinerun.yaml @@ -0,0 +1,10 @@ +apiVersion: tekton.dev/v1 +kind: PipelineRun +metadata: + name: http-resolver-pipelinerun +spec: + pipelineRef: + resolver: http + params: + - name: url + value: "https://raw.githubusercontent.com/openshift-pipelines/release-tests/refs/heads/master/testdata/resolvers/pipelines/http-resolver-pipeline.yaml" \ No newline at end of file diff --git a/testdata/resolvers/pipelines/http-resolver-pipeline.yaml b/testdata/resolvers/pipelines/http-resolver-pipeline.yaml new file mode 100644 index 00000000..91a72602 --- /dev/null +++ b/testdata/resolvers/pipelines/http-resolver-pipeline.yaml @@ -0,0 +1,12 @@ +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + name: http-resolver-pipeline +spec: + tasks: + - name: "http-task" + taskRef: + resolver: http + params: + - name: url + value: "https://raw.githubusercontent.com/openshift-pipelines/tektoncd-catalog/refs/heads/p/tasks/task-tkn/0.2.2/task-tkn.yaml" \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/buildah-disconnected.yaml b/testdata/v1/clustertask/pipelineruns/buildah-disconnected.yaml deleted file mode 100644 index 0e4ed4de..00000000 --- a/testdata/v1/clustertask/pipelineruns/buildah-disconnected.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: buildah-disconnected-run -spec: - pipelineRef: - name: buildah-pipeline - params: - - name: REVISION - value: fedora-38-dis - - name: SUBDIR - value: buildah-disconnected - timeouts: - pipeline: 10m - workspaces: - - name: source - persistentVolumeClaim: - claimName: shared-pvc \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/buildah.yaml b/testdata/v1/clustertask/pipelineruns/buildah.yaml deleted file mode 100644 index f62f71c8..00000000 --- a/testdata/v1/clustertask/pipelineruns/buildah.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: buildah-run -spec: - pipelineRef: - name: buildah-pipeline - timeouts: - pipeline: 10m - workspaces: - - name: source - persistentVolumeClaim: - claimName: shared-pvc \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/git-cli-read-private-sa.yaml b/testdata/v1/clustertask/pipelineruns/git-cli-read-private-sa.yaml deleted file mode 100644 index ba40cdf4..00000000 --- a/testdata/v1/clustertask/pipelineruns/git-cli-read-private-sa.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: git-cli-read-private-sa-run -spec: - taskRunSpecs: - - pipelineTaskName: git-cli - serviceAccountName: ssh-sa - pipelineRef: - name: git-cli-read-private-pipeline - timeouts: - pipeline: 5m - workspaces: - - name: shared-workspace - persistentVolumeClaim: - claimName: shared-pvc \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/git-cli-read-private.yaml b/testdata/v1/clustertask/pipelineruns/git-cli-read-private.yaml deleted file mode 100644 index 1811f6fa..00000000 --- a/testdata/v1/clustertask/pipelineruns/git-cli-read-private.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: git-cli-read-private-run -spec: - pipelineRef: - name: git-cli-read-private-pipeline - timeouts: - pipeline: 5m - workspaces: - - name: shared-workspace - persistentVolumeClaim: - claimName: shared-pvc \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/git-cli.yaml b/testdata/v1/clustertask/pipelineruns/git-cli.yaml deleted file mode 100644 index 3575e92e..00000000 --- a/testdata/v1/clustertask/pipelineruns/git-cli.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: git-cli-run -spec: - # serviceAccountName: git-service-account - pipelineRef: - name: git-cli-pipeline - timeouts: - pipeline: 5m - workspaces: - - name: shared-workspace - persistentVolumeClaim: - claimName: shared-pvc - - name: input - emptyDir: {} \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/git-clone-read-private-sa.yaml b/testdata/v1/clustertask/pipelineruns/git-clone-read-private-sa.yaml deleted file mode 100644 index e762511e1..00000000 --- a/testdata/v1/clustertask/pipelineruns/git-clone-read-private-sa.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: git-clone-read-private-pipeline-sa-run -spec: - taskRunSpecs: - - pipelineTaskName: git-clone - serviceAccountName: ssh-sa - pipelineRef: - name: git-clone-read-private-pipeline - timeouts: - pipeline: 5m - workspaces: - - name: shared-workspace - emptyDir: {} diff --git a/testdata/v1/clustertask/pipelineruns/git-clone-read-private.yaml b/testdata/v1/clustertask/pipelineruns/git-clone-read-private.yaml deleted file mode 100644 index 68ae5bb8..00000000 --- a/testdata/v1/clustertask/pipelineruns/git-clone-read-private.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: git-clone-read-private-pipeline-run -spec: - pipelineRef: - name: git-clone-read-private-pipeline - timeouts: - pipeline: 5m - workspaces: - - name: shared-workspace - emptyDir: {} diff --git a/testdata/v1/clustertask/pipelineruns/kn.yaml b/testdata/v1/clustertask/pipelineruns/kn.yaml deleted file mode 100644 index 63b5e8b8..00000000 --- a/testdata/v1/clustertask/pipelineruns/kn.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: kn-run -spec: - pipelineSpec: - tasks: - - name: run-kn - taskRef: - name: kn - kind: ClusterTask - params: - - name: ARGS - value: - - "service" - - "create" - - "hello" - - "--force" - - "--image=gcr.io/knative-samples/helloworld-go:latest" - - "-eTARGET=Pipeline" - timeouts: - pipeline: 10m diff --git a/testdata/v1/clustertask/pipelineruns/maven.yaml b/testdata/v1/clustertask/pipelineruns/maven.yaml deleted file mode 100644 index 2870590e..00000000 --- a/testdata/v1/clustertask/pipelineruns/maven.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: maven-run -spec: - pipelineRef: - name: maven-pipeline - timeouts: - pipeline: 15m - workspaces: - - name: source - persistentVolumeClaim: - claimName: shared-pvc - - name: maven-settings - configMap: - name: maven-settings \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/openshift-client.yaml b/testdata/v1/clustertask/pipelineruns/openshift-client.yaml deleted file mode 100644 index 99dd8087..00000000 --- a/testdata/v1/clustertask/pipelineruns/openshift-client.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: openshift-client-run -spec: - pipelineSpec: - tasks: - - name: run-oc - taskRef: - name: openshift-client - kind: ClusterTask - params: - - name: SCRIPT - value: | - oc get pipeline -n openshift - timeouts: - pipeline: 5m \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/skopeo-copy.yaml b/testdata/v1/clustertask/pipelineruns/skopeo-copy.yaml deleted file mode 100644 index ffd5dbbe..00000000 --- a/testdata/v1/clustertask/pipelineruns/skopeo-copy.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: skopeo-copy-run -spec: - pipelineSpec: - tasks: - - name: run-skopeo-copy - taskRef: - name: skopeo-copy - kind: ClusterTask - params: - - name: srcImageURL - value: "docker://image-registry.openshift-image-registry.svc:5000/openshift/golang" - - name: destImageURL - value: "docker://image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/golang:skopeo" - - name: destTLSverify - value: "true" - workspaces: - - name: images-url - workspace: images-url - workspaces: - - name: images-url - timeouts: - pipeline: 5m - workspaces: - - name: images-url - emptyDir: {} diff --git a/testdata/v1/clustertask/pipelineruns/tkn-pac.yaml b/testdata/v1/clustertask/pipelineruns/tkn-pac.yaml deleted file mode 100644 index 813e085b..00000000 --- a/testdata/v1/clustertask/pipelineruns/tkn-pac.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: tkn-pac-run -spec: - pipelineSpec: - tasks: - - name: run-tkn - taskRef: - name: tkn - kind: ClusterTask - params: - - name: ARGS - value: - - "pac" - - "version" - timeouts: - pipeline: 5m \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/tkn-version.yaml b/testdata/v1/clustertask/pipelineruns/tkn-version.yaml deleted file mode 100644 index 2b4d43e8..00000000 --- a/testdata/v1/clustertask/pipelineruns/tkn-version.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: tkn-version-run -spec: - pipelineSpec: - tasks: - - name: run-tkn - taskRef: - name: tkn - kind: ClusterTask - params: - - name: ARGS - value: - - "version" - timeouts: - pipeline: 5m \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelineruns/tkn.yaml b/testdata/v1/clustertask/pipelineruns/tkn.yaml deleted file mode 100644 index 2c37feb6..00000000 --- a/testdata/v1/clustertask/pipelineruns/tkn.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: PipelineRun -metadata: - name: tkn-run -spec: - pipelineSpec: - tasks: - - name: run-tkn - taskRef: - name: tkn - kind: ClusterTask - params: - - name: ARGS - value: - - "pipeline" - - "list" - - "-n" - - "openshift" - timeouts: - pipeline: 2m \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelines/buildah.yaml b/testdata/v1/clustertask/pipelines/buildah.yaml deleted file mode 100644 index 6794a9fc..00000000 --- a/testdata/v1/clustertask/pipelines/buildah.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: Pipeline -metadata: - name: buildah-pipeline -spec: - params: - - name: REVISION - default: fedora-38 - - name: SUBDIR - description: where to clone the git repo - default: buildah - workspaces: - - name: source - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/ppitonak/nocode - - name: subdirectory - value: $(params.SUBDIR) - - name: deleteExisting - value: "true" - - name: revision - value: $(params.REVISION) - - name: run-buildah - taskRef: - name: buildah - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - params: - - name: IMAGE - value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/$(params.SUBDIR) - - name: CONTEXT - value: $(params.SUBDIR) - - name: TLSVERIFY - value: "true" diff --git a/testdata/v1/clustertask/pipelines/git-cli-read-private.yaml b/testdata/v1/clustertask/pipelines/git-cli-read-private.yaml deleted file mode 100644 index ce667218..00000000 --- a/testdata/v1/clustertask/pipelines/git-cli-read-private.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: Pipeline -metadata: - name: git-cli-read-private-pipeline -spec: - workspaces: - - name: input - optional: true - - name: shared-workspace - tasks: - - name: git-cli - taskRef: - name: git-cli - kind: ClusterTask - workspaces: - - name: input - workspace: input - - name: source - workspace: shared-workspace - subPath: $(context.pipelineRun.name) - params: - - name: GIT_USER_NAME - value: git_username - - name: GIT_USER_EMAIL - value: git_email - - name: GIT_SCRIPT - value: | - git_dir=$(context.pipelineRun.name) - git clone --config core.sshCommand="ssh -oStrictHostKeyChecking=no" git@github.com:openshift-pipelines/test-private.git ${git_dir} - cd ${git_dir} - cat README.md - results: - - name: commit - value: $(tasks.git-cli.results.commit) \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelines/git-cli.yaml b/testdata/v1/clustertask/pipelines/git-cli.yaml deleted file mode 100644 index 380decfd..00000000 --- a/testdata/v1/clustertask/pipelines/git-cli.yaml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: Pipeline -metadata: - name: git-cli-pipeline -spec: - workspaces: - - name: shared-workspace - - name: input - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: shared-workspace - params: - - name: url - value: https://github.com/kelseyhightower/nocode - - name: subdirectory - value: "git-cli" - - name: deleteExisting - value: "true" - - name: git-cli - taskRef: - name: git-cli - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: shared-workspace - - name: input - workspace: input - params: - - name: GIT_USER_NAME - value: git_username - - name: GIT_USER_EMAIL - value: git_email - - name: GIT_SCRIPT - value: | - cd git-cli - git config --global safe.directory "*" - echo "Hello" > hello - git add . - git status - git commit -m "Add sample file" - git log --oneline -5 - results: - - name: commit - value: $(tasks.git-cli.results.commit) \ No newline at end of file diff --git a/testdata/v1/clustertask/pipelines/git-clone-read-private copy.yaml b/testdata/v1/clustertask/pipelines/git-clone-read-private copy.yaml deleted file mode 100644 index 999f2a4e..00000000 --- a/testdata/v1/clustertask/pipelines/git-clone-read-private copy.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: Pipeline -metadata: - name: git-clone-read-private-pipeline -spec: - workspaces: - - name: input - optional: true - - name: shared-workspace - tasks: - - name: git-clone - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: shared-workspace - params: - - name: url - value: git@github.com:openshift-pipelines/test-private.git - results: - - name: commit - value: $(tasks.git-clone.results.commit) diff --git a/testdata/v1/clustertask/pipelines/git-clone-read-private.yaml b/testdata/v1/clustertask/pipelines/git-clone-read-private.yaml deleted file mode 100644 index 999f2a4e..00000000 --- a/testdata/v1/clustertask/pipelines/git-clone-read-private.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: Pipeline -metadata: - name: git-clone-read-private-pipeline -spec: - workspaces: - - name: input - optional: true - - name: shared-workspace - tasks: - - name: git-clone - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: shared-workspace - params: - - name: url - value: git@github.com:openshift-pipelines/test-private.git - results: - - name: commit - value: $(tasks.git-clone.results.commit) diff --git a/testdata/v1/clustertask/pipelines/maven.yaml b/testdata/v1/clustertask/pipelines/maven.yaml deleted file mode 100644 index d71ae972..00000000 --- a/testdata/v1/clustertask/pipelines/maven.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: tekton.dev/v1 -kind: Pipeline -metadata: - name: maven-pipeline -spec: - workspaces: - - name: source - - name: maven-settings - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/jboss-openshift/openshift-quickstarts.git - - name: deleteExisting - value: "true" - - name: subdirectory - value: $(context.pipelineRun.name) - - name: build - taskRef: - name: maven - kind: ClusterTask - params: - - name: CONTEXT_DIR - value: $(context.pipelineRun.name)/undertow-servlet - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - - name: maven-settings - workspace: maven-settings \ No newline at end of file diff --git a/testdata/v1/clustertask/rolebindings/ssh-sa-scc.yaml b/testdata/v1/clustertask/rolebindings/ssh-sa-scc.yaml deleted file mode 100644 index d062044a..00000000 --- a/testdata/v1/clustertask/rolebindings/ssh-sa-scc.yaml +++ /dev/null @@ -1,11 +0,0 @@ -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: ssh-sa-scc-rolebinding -subjects: - - kind: ServiceAccount - name: ssh-sa -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pipelines-scc-clusterrole \ No newline at end of file diff --git a/testdata/v1/clustertask/secrets/ssh-key.yaml b/testdata/v1/clustertask/secrets/ssh-key.yaml deleted file mode 100644 index 81c4a0b7..00000000 --- a/testdata/v1/clustertask/secrets/ssh-key.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# This private key is only associated with repository github.com/ppitonak/topsecret and has read-only access -apiVersion: v1 -kind: Secret -type: kubernetes.io/ssh-auth -metadata: - name: ssh-key - annotations: - tekton.dev/git-0: github.com -data: - ssh-privatekey: LS0tLS1CRUdJTiBPUEVOU1NIIFBSSVZBVEUgS0VZLS0tLS0KYjNCbGJuTnphQzFyWlhrdGRqRUFBQUFBQkc1dmJtVUFBQUFFYm05dVpRQUFBQUFBQUFBQkFBQUFhQUFBQUJObFkyUnpZUwoxemFHRXlMVzVwYzNSd01qVTJBQUFBQ0c1cGMzUndNalUyQUFBQVFRU2p3RHN5aWFGdmdsNEtMbDY1NklBNThGZDk2YkRyCmJQZW8rNVMxSldoVzY0TytnZmxxS0hTMENzMHZKSmF1NEhZaS91M0pOY3RwWEMrY25JSzJCTGxaQUFBQXFQeFNrdmo4VXAKTDRBQUFBRTJWalpITmhMWE5vWVRJdGJtbHpkSEF5TlRZQUFBQUlibWx6ZEhBeU5UWUFBQUJCQktQQU96S0pvVytDWGdvdQpYcm5vZ0Rud1YzM3BzT3RzOTZqN2xMVWxhRmJyZzc2QitXb29kTFFLelM4a2xxN2dkaUwrN2NrMXkybGNMNXljZ3JZRXVWCmtBQUFBaEFOUXdUUGZHN1M2M0s1NGw5Uk1ZelNsdDI4Y2hjaldrY20rSjNSRXJ6VllTQUFBQURYTmhkbWwwWVVCbVpXUnYKY21FQkFnPT0KLS0tLS1FTkQgT1BFTlNTSCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/testdata/v1/clustertask/serviceaccount/ssh-sa.yaml b/testdata/v1/clustertask/serviceaccount/ssh-sa.yaml deleted file mode 100644 index 7decf79b..00000000 --- a/testdata/v1/clustertask/serviceaccount/ssh-sa.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: ssh-sa \ No newline at end of file diff --git a/testdata/v1beta1/clustertask/pipelineruns/git-clone-read-private.yaml b/testdata/v1beta1/clustertask/pipelineruns/git-clone-read-private.yaml deleted file mode 100644 index 61c19516..00000000 --- a/testdata/v1beta1/clustertask/pipelineruns/git-clone-read-private.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: PipelineRun -metadata: - name: git-clone-read-private-pipeline-run -spec: - pipelineRef: - name: git-clone-read-private-pipeline - timeout: 5m - workspaces: - - name: shared-workspace - emptyDir: {} diff --git a/testdata/v1beta1/clustertask/pipelines/s2i-dotnet.yaml b/testdata/v1beta1/clustertask/pipelines/s2i-dotnet.yaml deleted file mode 100644 index 3e41caf8..00000000 --- a/testdata/v1beta1/clustertask/pipelines/s2i-dotnet.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: s2i-dotnet-pipeline -spec: - params: - - name: EXAMPLE_REVISION - description: git branch of .NET example - default: dotnetcore-3.1 - - name: VERSION - description: The tag of 'dotnet' imagestream for .NET version - workspaces: - - name: source - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/redhat-developer/s2i-dotnetcore-ex - - name: subdirectory - value: s2i-dotnet-$(params.VERSION) - - name: deleteExisting - value: "true" - - name: revision - value: $(params.EXAMPLE_REVISION) - - name: build - taskRef: - name: s2i-dotnet - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - params: - - name: IMAGE - value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/s2i-dotnet-$(params.VERSION) - - name: PATH_CONTEXT - value: "s2i-dotnet-$(params.VERSION)/app" - - name: TLSVERIFY - value: "true" - - name: VERSION - value: $(params.VERSION) - retries: 1 diff --git a/testdata/v1beta1/clustertask/pipelines/s2i-golang.yaml b/testdata/v1beta1/clustertask/pipelines/s2i-golang.yaml deleted file mode 100644 index 290aad0b..00000000 --- a/testdata/v1beta1/clustertask/pipelines/s2i-golang.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: s2i-go-pipeline -spec: - params: - - name: VERSION - description: The tag of 'golang' imagestream for Go version - workspaces: - - name: source - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/chmouel/go-simple-uploader.git - - name: revision - value: e8e751f5e189ee493a43948b667f665f8d7e1057 - - name: subdirectory - value: s2i-go-$(params.VERSION) - - name: deleteExisting - value: "true" - - name: build - taskRef: - name: s2i-go - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - params: - - name: IMAGE - value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/s2i-go-$(params.VERSION) - - name: PATH_CONTEXT - value: s2i-go-$(params.VERSION) - - name: TLSVERIFY - value: "true" - - name: VERSION - value: $(params.VERSION) - retries: 1 diff --git a/testdata/v1beta1/clustertask/pipelines/s2i-java.yaml b/testdata/v1beta1/clustertask/pipelines/s2i-java.yaml deleted file mode 100644 index 9f2d4e1b..00000000 --- a/testdata/v1beta1/clustertask/pipelines/s2i-java.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: s2i-java-pipeline -spec: - params: - - name: VERSION - description: The tag of 'java' imagestream for Java version - workspaces: - - name: source - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/jboss-openshift/openshift-quickstarts.git - - name: subdirectory - value: s2i-java-$(params.VERSION) - - name: deleteExisting - value: "true" - - name: revision - value: master - - name: build - taskRef: - name: s2i-java - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - params: - - name: IMAGE - value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/s2i-java-$(params.VERSION) - - name: PATH_CONTEXT - value: s2i-java-$(params.VERSION)/undertow-servlet - - name: TLSVERIFY - value: "true" - - name: VERSION - value: $(params.VERSION) - retries: 1 diff --git a/testdata/v1beta1/clustertask/pipelines/s2i-perl.yaml b/testdata/v1beta1/clustertask/pipelines/s2i-perl.yaml deleted file mode 100644 index d1a1df95..00000000 --- a/testdata/v1beta1/clustertask/pipelines/s2i-perl.yaml +++ /dev/null @@ -1,44 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: s2i-perl-pipeline -spec: - params: - - name: VERSION - description: The tag of 'perl' imagestream for Perl version - workspaces: - - name: source - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/therealshabi/hello-world-1 - - name: subdirectory - value: s2i-perl-$(params.VERSION) - - name: deleteExisting - value: "true" - - name: build - taskRef: - name: s2i-perl - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - params: - - name: IMAGE - value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/s2i-perl-$(params.VERSION) - - name: PATH_CONTEXT - value: s2i-perl-$(params.VERSION) - - name: TLSVERIFY - value: "true" - - name: VERSION - value: $(params.VERSION) - retries: 1 diff --git a/testdata/v1beta1/clustertask/pipelines/s2i-php.yaml b/testdata/v1beta1/clustertask/pipelines/s2i-php.yaml deleted file mode 100644 index 914fe7e7..00000000 --- a/testdata/v1beta1/clustertask/pipelines/s2i-php.yaml +++ /dev/null @@ -1,44 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: s2i-php-pipeline -spec: - params: - - name: VERSION - description: The tag of 'php' imagestream for PHP version - workspaces: - - name: source - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/sclorg/s2i-php-container - - name: subdirectory - value: s2i-php-$(params.VERSION) - - name: deleteExisting - value: "true" - - name: build - taskRef: - name: s2i-php - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - params: - - name: IMAGE - value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/s2i-php-$(params.VERSION) - - name: PATH_CONTEXT - value: "s2i-php-$(params.VERSION)/test/test-app" - - name: TLSVERIFY - value: "true" - - name: VERSION - value: $(params.VERSION) - retries: 1 diff --git a/testdata/v1beta1/clustertask/pipelines/s2i-python.yaml b/testdata/v1beta1/clustertask/pipelines/s2i-python.yaml deleted file mode 100644 index bb6a6f82..00000000 --- a/testdata/v1beta1/clustertask/pipelines/s2i-python.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: s2i-python-pipeline -spec: - params: - - name: VERSION - description: The tag of 'python' imagestream for Python version - workspaces: - - name: source - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/openshift-pipelines/release-tests - - name: revision - value: master - - name: subdirectory - value: s2i-python-$(params.VERSION) - - name: deleteExisting - value: "true" - - name: build - taskRef: - name: s2i-python - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - params: - - name: IMAGE - value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/s2i-python-$(params.VERSION) - - name: PATH_CONTEXT - value: s2i-python-$(params.VERSION)/testdata/s2i_python - - name: TLSVERIFY - value: "true" - - name: VERSION - value: $(params.VERSION) - retries: 1 \ No newline at end of file diff --git a/testdata/v1beta1/clustertask/pipelines/s2i-ruby.yaml b/testdata/v1beta1/clustertask/pipelines/s2i-ruby.yaml deleted file mode 100644 index 4b53aace..00000000 --- a/testdata/v1beta1/clustertask/pipelines/s2i-ruby.yaml +++ /dev/null @@ -1,44 +0,0 @@ -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: s2i-ruby-pipeline -spec: - params: - - name: VERSION - description: The tag of 'ruby' imagestream for Ruby version - workspaces: - - name: source - tasks: - - name: clone-git-repo - taskRef: - name: git-clone - kind: ClusterTask - workspaces: - - name: output - workspace: source - params: - - name: url - value: https://github.com/sclorg/ruby-ex - - name: subdirectory - value: s2i-ruby-$(params.VERSION) - - name: deleteExisting - value: "true" - - name: build - taskRef: - name: s2i-ruby - kind: ClusterTask - runAfter: - - clone-git-repo - workspaces: - - name: source - workspace: source - params: - - name: IMAGE - value: image-registry.openshift-image-registry.svc:5000/$(context.pipelineRun.namespace)/s2i-ruby-$(params.VERSION) - - name: PATH_CONTEXT - value: s2i-ruby-$(params.VERSION) - - name: TLSVERIFY - value: "true" - - name: VERSION - value: $(params.VERSION) - retries: 1