feat(workflow_engine): Only link workflows to the IssueStream #248020
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: backend | |
| on: | |
| push: | |
| branches: | |
| - master | |
| pull_request: | |
| types: [opened, synchronize, reopened, labeled] | |
| workflow_dispatch: | |
| # Cancel in progress workflows on pull_requests. | |
| # https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value | |
| concurrency: | |
| group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} | |
| cancel-in-progress: true | |
| # hack for https://github.com/actions/cache/issues/810#issuecomment-1222550359 | |
| env: | |
| SEGMENT_DOWNLOAD_TIMEOUT_MINS: 3 | |
| SNUBA_NO_WORKERS: 1 | |
| SENTRY_SKIP_SELENIUM_PLUGIN: '1' | |
| jobs: | |
| files-changed: | |
| name: detect what files changed | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 3 | |
| # Map a step output to a job output | |
| outputs: | |
| api_docs: ${{ steps.changes.outputs.api_docs }} | |
| backend: ${{ steps.changes.outputs.backend_all_without_acceptance }} | |
| backend_dependencies: ${{ steps.changes.outputs.backend_dependencies }} | |
| backend_api_urls: ${{ steps.changes.outputs.backend_api_urls }} | |
| backend_any_type: ${{ steps.changes.outputs.backend_any_type }} | |
| migration_lockfile: ${{ steps.changes.outputs.migration_lockfile }} | |
| # When true, skip selective testing and run the full backend suite | |
| skip_selective_testing: "${{ github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'Trigger: Override Selective Testing') }}" | |
| steps: | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - name: Check for backend file changes | |
| uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 | |
| id: changes | |
| with: | |
| token: ${{ github.token }} | |
| filters: .github/file-filters.yml | |
| api-docs: | |
| if: needs.files-changed.outputs.api_docs == 'true' | |
| needs: files-changed | |
| name: api docs test | |
| runs-on: ubuntu-24.04 | |
| steps: | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4 | |
| id: setup-node | |
| with: | |
| node-version-file: '.node-version' | |
| - uses: pnpm/action-setup@9b5745cdf0a2e8c2620f0746130f809adb911c19 # v4 | |
| - name: Setup sentry python env | |
| uses: ./.github/actions/setup-sentry | |
| id: setup | |
| with: | |
| mode: default | |
| - name: Run API docs tests | |
| run: | | |
| make test-api-docs | |
| - name: Inspect failure | |
| if: failure() | |
| run: | | |
| if command -v devservices; then | |
| devservices logs | |
| fi | |
| # Selective testing - only on PRs, determine which tests to run based on coverage data. | |
| # This job is skipped on push-to-master where the full suite runs instead. | |
| select-tests: | |
| if: >- | |
| needs.files-changed.outputs.backend == 'true' && | |
| needs.files-changed.outputs.skip_selective_testing != 'true' && | |
| github.event_name == 'pull_request' && | |
| github.event.pull_request.head.repo.full_name == github.repository | |
| needs: files-changed | |
| name: select tests | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 10 | |
| permissions: | |
| contents: read | |
| id-token: write | |
| outputs: | |
| has-selected-tests: ${{ steps.compute-tests.outputs.has-selected-tests }} | |
| test-count: ${{ steps.compute-tests.outputs.test-count }} | |
| steps: | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - name: Get changed files | |
| id: changed | |
| env: | |
| GH_TOKEN: ${{ github.token }} | |
| run: | | |
| gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/files \ | |
| --paginate | python3 .github/workflows/scripts/parse-pr-files.py >> "$GITHUB_OUTPUT" | |
| - name: Setup Python | |
| uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 | |
| with: | |
| python-version-file: '.python-version' | |
| - name: Authenticate to Google Cloud | |
| uses: google-github-actions/auth@c200f3691d83b41bf9bbd8638997a462592937ed # v2.1.3 | |
| with: | |
| project_id: sentry-dev-tooling | |
| workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} | |
| service_account: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} | |
| - name: Download coverage database | |
| id: download-coverage | |
| run: | | |
| mkdir -p .artifacts/coverage | |
| GCS_PATH="gs://getsentry-coverage-data/latest/.coverage.combined" | |
| echo "Fetching coverage DB from: $GCS_PATH" | |
| gcloud storage ls -l "$GCS_PATH" 2>/dev/null || true | |
| if ! gcloud storage cp "$GCS_PATH" \ | |
| .artifacts/coverage/.coverage.combined 2>/dev/null; then | |
| echo "Warning: Failed to download coverage from GCS, will run full test suite" | |
| echo "coverage-file=" >> "$GITHUB_OUTPUT" | |
| else | |
| ls -lh .artifacts/coverage/.coverage.combined | |
| echo "coverage-file=.artifacts/coverage/.coverage.combined" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Compute selected tests | |
| id: compute-tests | |
| if: steps.download-coverage.outputs.coverage-file != '' | |
| env: | |
| COVERAGE_DB: ${{ steps.download-coverage.outputs.coverage-file }} | |
| CHANGED_FILES: ${{ steps.changed.outputs.files }} | |
| PREVIOUS_FILENAMES: ${{ steps.changed.outputs.previous-filenames }} | |
| run: | | |
| python3 .github/workflows/scripts/compute-sentry-selected-tests.py \ | |
| --coverage-db "$COVERAGE_DB" \ | |
| --changed-files "$CHANGED_FILES" \ | |
| --previous-filenames "$PREVIOUS_FILENAMES" \ | |
| --output .artifacts/selected-tests.txt \ | |
| --github-output | |
| - name: Upload selected tests artifact | |
| if: steps.compute-tests.outputs.has-selected-tests == 'true' | |
| uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 | |
| with: | |
| name: selected-tests-${{ github.run_id }} | |
| path: .artifacts/selected-tests.txt | |
| retention-days: 1 | |
| calculate-shards: | |
| # Use always() so this job runs even when select-tests is skipped (master) | |
| if: >- | |
| always() && | |
| !cancelled() && | |
| needs.files-changed.outputs.backend == 'true' | |
| needs: [files-changed, select-tests] | |
| name: calculate test shards | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 5 | |
| outputs: | |
| shard-count: ${{ steps.static-shards.outputs.shard-count || steps.calculate-shards.outputs.shard-count }} | |
| shard-indices: ${{ steps.static-shards.outputs.shard-indices || steps.calculate-shards.outputs.shard-indices }} | |
| steps: | |
| - name: Use default shards (no selective testing) | |
| id: static-shards | |
| if: needs.select-tests.outputs.has-selected-tests != 'true' | |
| # Keep in sync with MAX_SHARDS in .github/workflows/scripts/calculate-backend-test-shards.py | |
| run: | | |
| echo "shard-count=22" >> "$GITHUB_OUTPUT" | |
| echo "shard-indices=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21]" >> "$GITHUB_OUTPUT" | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| if: needs.select-tests.outputs.has-selected-tests == 'true' | |
| - name: Download selected tests artifact | |
| if: needs.select-tests.outputs.has-selected-tests == 'true' | |
| uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 | |
| with: | |
| name: selected-tests-${{ github.run_id }} | |
| path: .artifacts/ | |
| - name: Calculate test shards | |
| id: calculate-shards | |
| if: needs.select-tests.outputs.has-selected-tests == 'true' | |
| env: | |
| SELECTED_TESTS_FILE: '.artifacts/selected-tests.txt' | |
| SELECTED_TEST_COUNT: ${{ needs.select-tests.outputs.test-count || '' }} | |
| run: | | |
| python3 .github/workflows/scripts/calculate-backend-test-shards.py | |
| backend-test: | |
| # Use always() so this job runs even when select-tests is skipped (master) | |
| if: >- | |
| always() && | |
| !cancelled() && | |
| needs.files-changed.outputs.backend == 'true' && needs.calculate-shards.outputs.shard-count != '0' | |
| needs: [files-changed, select-tests, calculate-shards] | |
| name: backend test | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 60 | |
| permissions: | |
| contents: read | |
| id-token: write | |
| actions: read # used for DIM metadata and job URL lookup | |
| pull-requests: write # used to post failure comments | |
| strategy: | |
| # This helps not having to run multiple jobs because one fails, thus, reducing resource usage | |
| # and reducing the risk that one of many runs would turn red again (read: intermittent tests) | |
| fail-fast: false | |
| matrix: | |
| # Dynamic matrix from calculate-shards | |
| instance: ${{ fromJSON(needs.calculate-shards.outputs.shard-indices) }} | |
| env: | |
| # Dynamic total from calculate-shards | |
| MATRIX_INSTANCE_TOTAL: ${{ needs.calculate-shards.outputs.shard-count }} | |
| TEST_GROUP_STRATEGY: roundrobin | |
| PYTHONHASHSEED: '0' | |
| XDIST_PER_WORKER_SNUBA: '1' | |
| XDIST_WORKERS: ${{ github.event_name == 'pull_request' && '2' || '3' }} | |
| steps: | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - name: Setup sentry env | |
| uses: ./.github/actions/setup-sentry | |
| id: setup | |
| with: | |
| mode: backend-ci | |
| - name: Download odiff binary | |
| run: | | |
| curl -sL https://registry.npmjs.org/odiff-bin/-/odiff-bin-4.3.2.tgz \ | |
| | tar -xz --strip-components=2 package/raw_binaries/odiff-linux-x64 | |
| sudo install -m 755 odiff-linux-x64 /usr/local/bin/odiff | |
| rm odiff-linux-x64 | |
| - name: Bootstrap per-worker Snuba instances | |
| if: env.XDIST_PER_WORKER_SNUBA == '1' | |
| run: | | |
| set -eo pipefail | |
| SNUBA_IMAGE=$(docker inspect snuba-snuba-1 --format '{{.Config.Image}}') | |
| SNUBA_NETWORK=$(docker inspect snuba-snuba-1 --format '{{range $k, $v := .NetworkSettings.Networks}}{{$k}}{{end}}') | |
| if [ -z "$SNUBA_IMAGE" ] || [ -z "$SNUBA_NETWORK" ]; then | |
| echo "ERROR: Could not inspect snuba-snuba-1 container. Is devservices running?" | |
| exit 1 | |
| fi | |
| docker stop snuba-snuba-1 || true | |
| PIDS=() | |
| for i in $(seq 0 $(( ${XDIST_WORKERS} - 1 ))); do | |
| ( | |
| WORKER_DB="default_gw${i}" | |
| WORKER_PORT=$((1230 + i)) | |
| curl -sf 'http://localhost:8123/' --data-binary "CREATE DATABASE IF NOT EXISTS ${WORKER_DB}" | |
| docker run --rm --network "$SNUBA_NETWORK" \ | |
| -e "CLICKHOUSE_DATABASE=${WORKER_DB}" -e "CLICKHOUSE_HOST=clickhouse" \ | |
| -e "CLICKHOUSE_PORT=9000" -e "CLICKHOUSE_HTTP_PORT=8123" \ | |
| -e "DEFAULT_BROKERS=kafka:9093" -e "REDIS_HOST=redis" \ | |
| -e "REDIS_PORT=6379" -e "REDIS_DB=1" -e "SNUBA_SETTINGS=docker" \ | |
| "$SNUBA_IMAGE" bootstrap --force 2>&1 | tail -3 | |
| docker run -d --name "snuba-gw${i}" --network "$SNUBA_NETWORK" \ | |
| -p "${WORKER_PORT}:1218" \ | |
| -e "CLICKHOUSE_DATABASE=${WORKER_DB}" -e "CLICKHOUSE_HOST=clickhouse" \ | |
| -e "CLICKHOUSE_PORT=9000" -e "CLICKHOUSE_HTTP_PORT=8123" \ | |
| -e "DEFAULT_BROKERS=kafka:9093" -e "REDIS_HOST=redis" \ | |
| -e "REDIS_PORT=6379" -e "REDIS_DB=1" -e "SNUBA_SETTINGS=docker" \ | |
| -e "DEBUG=1" "$SNUBA_IMAGE" api | |
| for attempt in $(seq 1 30); do | |
| if curl -sf "http://127.0.0.1:${WORKER_PORT}/health" > /dev/null 2>&1; then | |
| echo "snuba-gw${i} healthy on port ${WORKER_PORT}" | |
| break | |
| fi | |
| if [ "$attempt" -eq 30 ]; then | |
| echo "ERROR: snuba-gw${i} failed health check after 30 attempts" | |
| docker logs "snuba-gw${i}" 2>&1 | tail -20 || true | |
| exit 1 | |
| fi | |
| sleep 2 | |
| done | |
| ) & | |
| PIDS+=($!) | |
| done | |
| for pid in "${PIDS[@]}"; do | |
| wait "$pid" || { echo "ERROR: Snuba bootstrap subshell (PID $pid) failed"; exit 1; } | |
| done | |
| - name: Download selected tests artifact | |
| if: needs.select-tests.outputs.has-selected-tests == 'true' | |
| uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 | |
| with: | |
| name: selected-tests-${{ github.run_id }} | |
| path: .artifacts/ | |
| - name: Run backend test (${{ steps.setup.outputs.matrix-instance-number }} of ${{ steps.setup.outputs.matrix-instance-total }}) | |
| env: | |
| SELECTED_TESTS_FILE: ${{ needs.select-tests.outputs.has-selected-tests == 'true' && '.artifacts/selected-tests.txt' || '' }} | |
| run: | | |
| if [ -n "${XDIST_WORKERS}" ]; then | |
| export PYTEST_ADDOPTS="$PYTEST_ADDOPTS -n ${XDIST_WORKERS} --dist=loadfile" | |
| timeout 1200 make test-python-ci || { | |
| rc=$? | |
| if [ "$rc" -eq 124 ]; then | |
| echo "::error::Test run timed out after 20 minutes (possible xdist hang)" | |
| fi | |
| exit "$rc" | |
| } | |
| else | |
| make test-python-ci | |
| fi | |
| - name: Report failures | |
| if: ${{ !cancelled() && github.event_name == 'pull_request' }} | |
| continue-on-error: true | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 | |
| env: | |
| PYTEST_JSON_PATH: ${{ github.workspace }}/.artifacts/pytest.json | |
| PYTEST_ARTIFACT_DIR: pytest-results-backend-${{ github.run_id }}-${{ matrix.instance }} | |
| with: | |
| script: | | |
| const { reportShard } = await import(`${process.env.GITHUB_WORKSPACE}/.github/workflows/scripts/report-backend-test-failures.js`); | |
| await reportShard({ github, context, core }); | |
| - name: Inspect failure | |
| if: failure() | |
| run: | | |
| if command -v devservices; then | |
| devservices logs | |
| fi | |
| if [ "${XDIST_PER_WORKER_SNUBA}" = "1" ]; then | |
| for i in $(seq 0 $(( ${XDIST_WORKERS} - 1 ))); do | |
| echo "--- snuba-gw${i} logs ---" | |
| docker logs "snuba-gw${i}" 2>&1 | tail -30 || true | |
| done | |
| fi | |
| - name: Collect test data | |
| uses: ./.github/actions/collect-test-data | |
| if: ${{ !cancelled() }} | |
| with: | |
| artifact_path: .artifacts/pytest.json | |
| gcs_bucket: ${{ secrets.COLLECT_TEST_DATA_GCS_BUCKET }} | |
| gcp_project_id: ${{ secrets.COLLECT_TEST_DATA_GCP_PROJECT_ID }} | |
| workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} | |
| service_account_email: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} | |
| matrix_instance_number: ${{ steps.setup.outputs.matrix-instance-number }} | |
| backend-migration-tests: | |
| if: needs.files-changed.outputs.backend == 'true' | |
| needs: files-changed | |
| name: backend migration tests | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 30 | |
| permissions: | |
| contents: read | |
| actions: read | |
| pull-requests: write | |
| steps: | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - name: Setup sentry env | |
| uses: ./.github/actions/setup-sentry | |
| id: setup | |
| with: | |
| mode: default | |
| - name: run tests | |
| run: | | |
| PYTEST_ADDOPTS="$PYTEST_ADDOPTS -m migrations --migrations --reruns 0 --fail-slow=120s" make test-python-ci | |
| - name: Report failures | |
| if: ${{ !cancelled() && github.event_name == 'pull_request' }} | |
| continue-on-error: true | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 | |
| env: | |
| PYTEST_JSON_PATH: ${{ github.workspace }}/.artifacts/pytest.json | |
| PYTEST_ARTIFACT_DIR: pytest-results-migration-${{ github.run_id }} | |
| with: | |
| script: | | |
| const { reportShard } = await import(`${process.env.GITHUB_WORKSPACE}/.github/workflows/scripts/report-backend-test-failures.js`); | |
| await reportShard({ github, context, core }); | |
| - name: Inspect failure | |
| if: failure() | |
| run: | | |
| if command -v devservices; then | |
| devservices logs | |
| fi | |
| cli: | |
| if: needs.files-changed.outputs.backend == 'true' | |
| needs: files-changed | |
| name: cli test | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 10 | |
| steps: | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - name: Setup sentry env | |
| uses: ./.github/actions/setup-sentry | |
| id: setup | |
| with: | |
| mode: migrations | |
| - name: Run test | |
| env: | |
| # This short circuits the create_default_projects post_upgrade hook | |
| # which spawns taskworkers which will spin for 5 minutes on kafka | |
| # not being up. We don't need the default project here as this is not local dev. | |
| SENTRY_NO_CREATE_DEFAULT_PROJECT: 1 | |
| run: make test-cli | |
| - name: Inspect failure | |
| if: failure() | |
| run: | | |
| if command -v devservices; then | |
| devservices logs | |
| fi | |
| requirements: | |
| if: needs.files-changed.outputs.backend_dependencies == 'true' | |
| needs: files-changed | |
| name: requirements check | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 3 | |
| steps: | |
| - uses: getsentry/action-github-app-token@5c1e90706fe007857338ac1bfbd7a4177db2f789 # v4.0.0 | |
| id: token | |
| continue-on-error: true | |
| with: | |
| app_id: ${{ vars.SENTRY_INTERNAL_APP_ID }} | |
| private_key: ${{ secrets.SENTRY_INTERNAL_APP_PRIVATE_KEY }} | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - uses: astral-sh/setup-uv@884ad927a57e558e7a70b92f2bccf9198a4be546 # v6 | |
| with: | |
| version: '0.9.28' | |
| - uses: getsentry/action-setup-venv@5a80476d175edf56cb205b08bc58986fa99d1725 # v3.2.0 | |
| with: | |
| cache-dependency-path: uv.lock | |
| install-cmd: echo | |
| - name: check requirements | |
| run: | | |
| make freeze-requirements | |
| if ! git diff --exit-code; then | |
| echo $'\n\nrun `make freeze-requirements` locally to update requirements' | |
| exit 1 | |
| fi | |
| - name: apply any requirements changes | |
| if: steps.token.outcome == 'success' && github.ref != 'refs/heads/master' && always() | |
| uses: getsentry/action-github-commit@31f6706ca1a7b9ad6d22c1b07bf3a92eabb05632 # v2.0.0 | |
| with: | |
| github-token: ${{ steps.token.outputs.token }} | |
| message: ':snowflake: re-freeze requirements' | |
| api-url-typescript: | |
| if: needs.files-changed.outputs.backend_api_urls == 'true' | |
| needs: files-changed | |
| name: api url typescript generation | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 10 | |
| steps: | |
| - uses: getsentry/action-github-app-token@5c1e90706fe007857338ac1bfbd7a4177db2f789 # v4.0.0 | |
| id: token | |
| with: | |
| app_id: ${{ vars.SENTRY_INTERNAL_APP_ID }} | |
| private_key: ${{ secrets.SENTRY_INTERNAL_APP_PRIVATE_KEY }} | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - name: Setup sentry env | |
| uses: ./.github/actions/setup-sentry | |
| with: | |
| mode: backend-ci | |
| - name: Sync API Urls to TypeScript | |
| run: | | |
| python3 -m tools.api_urls_to_typescript | |
| - name: Apply any file changes | |
| if: github.ref != 'refs/heads/master' && always() | |
| uses: getsentry/action-github-commit@31f6706ca1a7b9ad6d22c1b07bf3a92eabb05632 # v2.0.0 | |
| with: | |
| github-token: ${{ steps.token.outputs.token }} | |
| message: ':hammer_and_wrench: Sync API Urls to TypeScript' | |
| migration: | |
| if: needs.files-changed.outputs.migration_lockfile == 'true' | |
| needs: files-changed | |
| name: check migration | |
| runs-on: ubuntu-24.04 | |
| steps: | |
| - name: Checkout sentry | |
| uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - name: Setup sentry env | |
| uses: ./.github/actions/setup-sentry | |
| id: setup | |
| with: | |
| mode: migrations | |
| - name: Migration & lockfile checks | |
| env: | |
| SENTRY_LOG_LEVEL: ERROR | |
| PGPASSWORD: postgres | |
| run: | | |
| ./.github/workflows/scripts/migration-check.sh | |
| - name: Inspect failure | |
| if: failure() | |
| run: | | |
| if command -v devservices; then | |
| devservices logs | |
| fi | |
| monolith-dbs: | |
| if: needs.files-changed.outputs.backend == 'true' | |
| needs: files-changed | |
| name: monolith-dbs test | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 20 | |
| permissions: | |
| contents: read | |
| id-token: write | |
| actions: read | |
| pull-requests: write | |
| steps: | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - name: Setup sentry env | |
| uses: ./.github/actions/setup-sentry | |
| id: setup | |
| with: | |
| mode: migrations | |
| - name: Run test | |
| run: | | |
| make test-monolith-dbs | |
| - name: Report failures | |
| if: ${{ !cancelled() && github.event_name == 'pull_request' }} | |
| continue-on-error: true | |
| uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 | |
| env: | |
| PYTEST_JSON_PATH: ${{ github.workspace }}/.artifacts/pytest.monolith-dbs.json | |
| PYTEST_ARTIFACT_DIR: pytest-results-monolith-dbs-${{ github.run_id }} | |
| with: | |
| script: | | |
| const { reportShard } = await import(`${process.env.GITHUB_WORKSPACE}/.github/workflows/scripts/report-backend-test-failures.js`); | |
| await reportShard({ github, context, core }); | |
| - name: Inspect failure | |
| if: failure() | |
| run: | | |
| if command -v devservices; then | |
| devservices logs | |
| fi | |
| - name: Collect test data | |
| uses: ./.github/actions/collect-test-data | |
| if: ${{ !cancelled() }} | |
| with: | |
| artifact_path: .artifacts/pytest.monolith-dbs.json | |
| gcs_bucket: ${{ secrets.COLLECT_TEST_DATA_GCS_BUCKET }} | |
| gcp_project_id: ${{ secrets.COLLECT_TEST_DATA_GCP_PROJECT_ID }} | |
| workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} | |
| service_account_email: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} | |
| typing: | |
| if: needs.files-changed.outputs.backend == 'true' | |
| needs: files-changed | |
| name: backend typing | |
| runs-on: ubuntu-24.04 | |
| timeout-minutes: 20 | |
| steps: | |
| - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 | |
| - uses: astral-sh/setup-uv@884ad927a57e558e7a70b92f2bccf9198a4be546 # v6 | |
| with: | |
| version: '0.9.28' | |
| # we just cache the venv-dir directly in action-setup-venv | |
| enable-cache: false | |
| - uses: getsentry/action-setup-venv@5a80476d175edf56cb205b08bc58986fa99d1725 # v3.2.0 | |
| with: | |
| cache-dependency-path: uv.lock | |
| install-cmd: uv sync --frozen --active | |
| - name: setup sentry (lite) | |
| run: | | |
| python3 -m tools.fast_editable --path . | |
| sentry init | |
| - run: PYTHONWARNINGS=error::RuntimeWarning mypy | |
| id: run | |
| - uses: getsentry/action-github-app-token@5c1e90706fe007857338ac1bfbd7a4177db2f789 # v4.0.0 | |
| id: token | |
| continue-on-error: true | |
| with: | |
| app_id: ${{ vars.SENTRY_INTERNAL_APP_ID }} | |
| private_key: ${{ secrets.SENTRY_INTERNAL_APP_PRIVATE_KEY }} | |
| # only if `mypy` succeeds should we try and trim the blocklist | |
| - run: python3 -m tools.mypy_helpers.make_module_ignores | |
| id: regen-blocklist | |
| - run: git diff --exit-code | |
| - run: | | |
| # mypy does not have granular codes so don't allow specific messages to regress | |
| set -euo pipefail | |
| ! grep "'Settings' object has no attribute" .artifacts/mypy-all | |
| ! grep 'Argument .* of "dispatch" is incompatible with' .artifacts/mypy-all | |
| ! grep 'Cannot override class variable' .artifacts/mypy-all | |
| ! grep 'Exception type must be derived from BaseException' .artifacts/mypy-all | |
| ! grep 'Incompatible default for argument' .artifacts/mypy-all | |
| ! grep 'Incompatible return value type (got "HttpResponseBase"' .artifacts/mypy-all | |
| ! grep 'Incompatible types in "yield"' .artifacts/mypy-all | |
| ! grep 'Module "sentry.*has no attribute' .artifacts/mypy-all | |
| ! grep 'No return value expected' .artifacts/mypy-all | |
| ! grep 'Return value expected' .artifacts/mypy-all | |
| ! grep 'Unpacking a string is disallowed' .artifacts/mypy-all | |
| ! grep 'base class .* defined the type as.*Permission' .artifacts/mypy-all | |
| ! grep 'does not explicitly export attribute' .artifacts/mypy-all | |
| ! grep 'gets multiple values for' .artifacts/mypy-all | |
| - name: apply blocklist changes | |
| if: | | |
| steps.token.outcome == 'success' && | |
| steps.run.outcome == 'success' && | |
| steps.regen-blocklist.outcome == 'success' && | |
| github.ref != 'refs/heads/master' && | |
| always() | |
| uses: getsentry/action-github-commit@31f6706ca1a7b9ad6d22c1b07bf3a92eabb05632 # v2.0.0 | |
| with: | |
| github-token: ${{ steps.token.outputs.token }} | |
| message: ':knife: regenerate mypy module blocklist' | |
| # This check runs once all dependent jobs have passed | |
| # It symbolizes that all required Backend checks have succesfully passed (Or skipped) | |
| # This step is the only required backend check | |
| backend-required-check: | |
| needs: | |
| [ | |
| api-docs, | |
| backend-test, | |
| backend-migration-tests, | |
| calculate-shards, | |
| cli, | |
| files-changed, | |
| requirements, | |
| migration, | |
| monolith-dbs, | |
| typing, | |
| ] | |
| name: Backend | |
| # This is necessary since a failed/skipped dependent job would cause this job to be skipped | |
| if: always() | |
| runs-on: ubuntu-24.04 | |
| steps: | |
| # If any jobs we depend on fail, we will fail since this is a required check | |
| # NOTE: A timeout is considered a failure | |
| - name: Check for failures | |
| if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') | |
| run: | | |
| echo "One of the dependent jobs have failed. You may need to re-run it." && exit 1 |