From 989ee142e3ece0fa8d903c18444fc1941df9669c Mon Sep 17 00:00:00 2001 From: Claude Date: Tue, 23 Dec 2025 03:45:13 +0000 Subject: [PATCH] Refactor workflows: extract shared cgroup script, add framework integrations - Extract cgroup_workers.sh shared script for DRY CPU detection logic - Removes ~420 lines of duplicated code across 8 workflows - Single source of truth for cgroup-aware worker calculation - Refactor test-py-unittest.yml to simple runner pattern - Reduces from 1471 lines to 535 lines - Now compatible with run-branch-test.yml framework - Comparison logic delegated to regression-test.yml - Refactor test-storybook.yml to test-js-storybook.yml - Simple runner pattern matching other test workflows - Removes composite comparison logic (handled by run-branch-test.yml) - Add NUnit, xUnit, unittest, and Storybook to run-branch-test.yml - Full source/target/compare job chains for each framework - Framework auto-detection for .NET and Storybook projects - Aggregated regression counts in notifications - Fix missing xfailed_count output in .NET workflows Note: test-py-bandit.yml intentionally kept as security scanner (different purpose than test runners) --- .github/scripts/cgroup_workers.sh | 99 ++ .github/workflows/run-branch-test.yml | 406 +++++++- .github/workflows/test-cpp-gtest.yml | 76 +- .github/workflows/test-cs-nunit.yml | 81 +- .github/workflows/test-cs-xunit.yml | 81 +- .github/workflows/test-js-jest.yml | 76 +- .github/workflows/test-js-mocha.yml | 76 +- .github/workflows/test-js-storybook.yml | 280 +++++ .github/workflows/test-py-pytest.yml | 76 +- .github/workflows/test-py-unittest.yml | 1255 +++-------------------- .github/workflows/test-rs-cargo.yml | 76 +- .github/workflows/test-storybook.yml | 650 ------------ 12 files changed, 974 insertions(+), 2258 deletions(-) create mode 100644 .github/scripts/cgroup_workers.sh create mode 100644 .github/workflows/test-js-storybook.yml delete mode 100644 .github/workflows/test-storybook.yml diff --git a/.github/scripts/cgroup_workers.sh b/.github/scripts/cgroup_workers.sh new file mode 100644 index 0000000..603f7d0 --- /dev/null +++ b/.github/scripts/cgroup_workers.sh @@ -0,0 +1,99 @@ +#!/bin/bash +# Shared script for determining parallel worker count based on cgroup CPU limits +# Usage: source this script, then call: WORKERS=$(determine_workers "$PARALLEL_WORKERS_INPUT" "$RUNS_ON_INPUT") +# +# Arguments: +# $1 - parallel_workers input value (empty string, "auto", or a number) +# $2 - runs_on input value (JSON array string like '["self-hosted", "multithreaded"]') +# +# Returns: Number of workers to use (prints to stdout) + +cgroup_auto_workers() { + local n="" + + # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " + if [ -f /sys/fs/cgroup/cpu.max ]; then + local quota period + quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" + period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" + if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi + + # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us + if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then + local quota period + quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" + period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" + if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi + + # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) + if [ -z "$n" ]; then + local f="" + if [ -f /sys/fs/cgroup/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset.cpus" + elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset/cpuset.cpus" + fi + + if [ -n "$f" ]; then + local spec + spec="$(cat "$f" | tr -d '[:space:]')" + if [ -n "$spec" ]; then + local count=0 + IFS=',' read -r -a parts <<< "$spec" + for p in "${parts[@]}"; do + if [[ "$p" == *-* ]]; then + local a="${p%%-*}" + local b="${p##*-}" + if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then + count=$((count + b - a + 1)) + fi + elif [[ "$p" =~ ^[0-9]+$ ]]; then + count=$((count + 1)) + fi + done + if [ "$count" -gt 0 ]; then + n="$count" + fi + fi + fi + fi + + if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then + n="1" + fi + + echo "$n" +} + +determine_workers() { + local parallel_workers_input="$1" + local runs_on_input="$2" + local workers="" + + if [ -z "$parallel_workers_input" ]; then + # Default based on runner type + if echo "$runs_on_input" | grep -q "multithreaded"; then + workers="6" + else + workers="1" + fi + elif [ "$parallel_workers_input" = "auto" ]; then + workers="$(cgroup_auto_workers)" + else + # Use the provided number directly + workers="$parallel_workers_input" + fi + + echo "$workers" +} + +# If script is executed directly (not sourced), run determine_workers with args +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + determine_workers "$1" "$2" +fi diff --git a/.github/workflows/run-branch-test.yml b/.github/workflows/run-branch-test.yml index 736be89..f076b21 100644 --- a/.github/workflows/run-branch-test.yml +++ b/.github/workflows/run-branch-test.yml @@ -25,6 +25,12 @@ on: required: false type: string default: "stable" + # .NET options + dotnet-version: + description: ".NET SDK version for NUnit/xUnit tests." + required: false + type: string + default: "8.0.x" # C++/CMake options cmake-version: description: "CMake version for C++ tests." @@ -99,6 +105,33 @@ on: required: false type: string default: "." + # Python unittest options + unittest-start-directory: + description: "Directory passed to unittest discovery." + required: false + type: string + default: "." + unittest-test-pattern: + description: "Pattern used by unittest discovery." + required: false + type: string + default: "test*.py" + # Storybook options + storybook-port: + description: "Port for Storybook server." + required: false + type: string + default: "6006" + storybook-start-command: + description: "Command to start Storybook server." + required: false + type: string + default: "npm run storybook" + storybook-test-command: + description: "Command to run Storybook tests." + required: false + type: string + default: "npm run storybook-test" secrets: DISCORD_WEBHOOK_URL: required: false @@ -123,6 +156,30 @@ on: cpp_regression_count: description: "Number of regressions (C++)" value: ${{ jobs.cpp-compare.outputs.regression_count }} + nunit_has_regressions: + description: "Whether regressions were detected (NUnit)" + value: ${{ jobs.nunit-compare.outputs.has_regressions }} + nunit_regression_count: + description: "Number of regressions (NUnit)" + value: ${{ jobs.nunit-compare.outputs.regression_count }} + xunit_has_regressions: + description: "Whether regressions were detected (xUnit)" + value: ${{ jobs.xunit-compare.outputs.has_regressions }} + xunit_regression_count: + description: "Number of regressions (xUnit)" + value: ${{ jobs.xunit-compare.outputs.regression_count }} + unittest_has_regressions: + description: "Whether regressions were detected (unittest)" + value: ${{ jobs.unittest-compare.outputs.has_regressions }} + unittest_regression_count: + description: "Number of regressions (unittest)" + value: ${{ jobs.unittest-compare.outputs.regression_count }} + storybook_has_regressions: + description: "Whether regressions were detected (Storybook)" + value: ${{ jobs.storybook-compare.outputs.has_regressions }} + storybook_regression_count: + description: "Number of regressions (Storybook)" + value: ${{ jobs.storybook-compare.outputs.regression_count }} jobs: # Detect which test frameworks are present @@ -134,6 +191,10 @@ jobs: has_mocha: ${{ steps.detect.outputs.has_mocha }} has_cargo: ${{ steps.detect.outputs.has_cargo }} has_cpp: ${{ steps.detect.outputs.has_cpp }} + has_nunit: ${{ steps.detect.outputs.has_nunit }} + has_xunit: ${{ steps.detect.outputs.has_xunit }} + has_unittest: ${{ steps.detect.outputs.has_unittest }} + has_storybook: ${{ steps.detect.outputs.has_storybook }} steps: - uses: actions/checkout@v4.2.2 - name: Detect test frameworks @@ -216,6 +277,67 @@ jobs: echo "✅ Detected: Mocha" fi + # Detect NUnit (.NET) + HAS_NUNIT="false" + if find . -name "*.csproj" 2>/dev/null | head -1 | grep -q .; then + if grep -rq "NUnit" *.csproj 2>/dev/null || grep -rq "nunit" . --include="*.csproj" 2>/dev/null; then + HAS_NUNIT="true" + fi + fi + echo "has_nunit=$HAS_NUNIT" >> "$GITHUB_OUTPUT" + if [ "$HAS_NUNIT" = "true" ]; then + echo "✅ Detected: NUnit (.NET)" + fi + + # Detect xUnit (.NET) + HAS_XUNIT="false" + if find . -name "*.csproj" 2>/dev/null | head -1 | grep -q .; then + if grep -rq "xunit" . --include="*.csproj" 2>/dev/null; then + HAS_XUNIT="true" + fi + fi + echo "has_xunit=$HAS_XUNIT" >> "$GITHUB_OUTPUT" + if [ "$HAS_XUNIT" = "true" ]; then + echo "✅ Detected: xUnit (.NET)" + fi + + # Detect Python unittest (when pytest is not present) + HAS_UNITTEST="false" + if [ -f "pyproject.toml" ] || [ -f "setup.py" ] || find . -name "test_*.py" 2>/dev/null | head -1 | grep -q .; then + # Only enable unittest if pytest is NOT detected (pytest is preferred) + if ! grep -q "pytest" requirements.txt 2>/dev/null && ! grep -q "pytest" pyproject.toml 2>/dev/null; then + # Check for unittest imports + if grep -rq "import unittest" . --include="*.py" 2>/dev/null || grep -rq "from unittest" . --include="*.py" 2>/dev/null; then + HAS_UNITTEST="true" + fi + fi + fi + echo "has_unittest=$HAS_UNITTEST" >> "$GITHUB_OUTPUT" + if [ "$HAS_UNITTEST" = "true" ]; then + echo "✅ Detected: Python unittest" + fi + + # Detect Storybook + HAS_STORYBOOK="false" + if [ -f "package.json" ]; then + if grep -q '"@storybook' package.json 2>/dev/null; then + # Check for storybook test runner + if grep -q '"@storybook/test-runner"' package.json 2>/dev/null || grep -q '"storybook-test"' package.json 2>/dev/null; then + HAS_STORYBOOK="true" + fi + fi + # Check for .storybook directory + if [ -d ".storybook" ]; then + if [ -f "package.json" ] && grep -q '"@storybook/test-runner"' package.json 2>/dev/null; then + HAS_STORYBOOK="true" + fi + fi + fi + echo "has_storybook=$HAS_STORYBOOK" >> "$GITHUB_OUTPUT" + if [ "$HAS_STORYBOOK" = "true" ]; then + echo "✅ Detected: Storybook" + fi + # ==================== PYTEST ==================== # Test source branch (always fresh, no caching) pytest-source: @@ -488,9 +610,205 @@ jobs: current_no_tests_found: ${{ needs.cpp-source.outputs.no_tests_found }} artifact_name: regression_cpp_${{ github.event.pull_request.number || github.run_id }} + # ==================== NUNIT (.NET) ==================== + nunit-source: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_nunit == 'true' + uses: ./.github/workflows/test-cs-nunit.yml + with: + ref: "" + dotnet-version: ${{ inputs.dotnet-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: nunit_source_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + + nunit-target: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_nunit == 'true' + uses: ./.github/workflows/test-cs-nunit.yml + with: + ref: ${{ inputs.target_branch }} + dotnet-version: ${{ inputs.dotnet-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: nunit_target_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + + nunit-compare: + needs: [detect-frameworks, nunit-source, nunit-target] + if: always() && needs.detect-frameworks.outputs.has_nunit == 'true' && needs.nunit-source.result == 'success' + uses: ./.github/workflows/regression-test.yml + with: + runs_on: ${{ inputs.runs_on }} + baseline_label: ${{ inputs.target_branch }} + baseline_results_artifact: nunit_target_${{ github.event.pull_request.number || github.run_id }} + baseline_results_filename: test_data.json + current_label: ${{ github.head_ref || github.ref_name }} + current_results_artifact: nunit_source_${{ github.event.pull_request.number || github.run_id }} + current_results_filename: test_data.json + baseline_passed: ${{ needs.nunit-target.outputs.passed }} + baseline_total: ${{ needs.nunit-target.outputs.total }} + baseline_percentage: ${{ needs.nunit-target.outputs.percentage }} + current_passed: ${{ needs.nunit-source.outputs.passed }} + current_total: ${{ needs.nunit-source.outputs.total }} + current_percentage: ${{ needs.nunit-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.nunit-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.nunit-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.nunit-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.nunit-source.outputs.no_tests_found }} + artifact_name: regression_nunit_${{ github.event.pull_request.number || github.run_id }} + + # ==================== XUNIT (.NET) ==================== + xunit-source: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_xunit == 'true' + uses: ./.github/workflows/test-cs-xunit.yml + with: + ref: "" + dotnet-version: ${{ inputs.dotnet-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: xunit_source_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + + xunit-target: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_xunit == 'true' + uses: ./.github/workflows/test-cs-xunit.yml + with: + ref: ${{ inputs.target_branch }} + dotnet-version: ${{ inputs.dotnet-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: xunit_target_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + + xunit-compare: + needs: [detect-frameworks, xunit-source, xunit-target] + if: always() && needs.detect-frameworks.outputs.has_xunit == 'true' && needs.xunit-source.result == 'success' + uses: ./.github/workflows/regression-test.yml + with: + runs_on: ${{ inputs.runs_on }} + baseline_label: ${{ inputs.target_branch }} + baseline_results_artifact: xunit_target_${{ github.event.pull_request.number || github.run_id }} + baseline_results_filename: test_data.json + current_label: ${{ github.head_ref || github.ref_name }} + current_results_artifact: xunit_source_${{ github.event.pull_request.number || github.run_id }} + current_results_filename: test_data.json + baseline_passed: ${{ needs.xunit-target.outputs.passed }} + baseline_total: ${{ needs.xunit-target.outputs.total }} + baseline_percentage: ${{ needs.xunit-target.outputs.percentage }} + current_passed: ${{ needs.xunit-source.outputs.passed }} + current_total: ${{ needs.xunit-source.outputs.total }} + current_percentage: ${{ needs.xunit-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.xunit-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.xunit-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.xunit-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.xunit-source.outputs.no_tests_found }} + artifact_name: regression_xunit_${{ github.event.pull_request.number || github.run_id }} + + # ==================== PYTHON UNITTEST ==================== + unittest-source: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_unittest == 'true' + uses: ./.github/workflows/test-py-unittest.yml + with: + ref: "" + python-version: ${{ inputs.python-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: unittest_source_${{ github.event.pull_request.number || github.run_id }} + start-directory: ${{ inputs.unittest-start-directory }} + test-pattern: ${{ inputs.unittest-test-pattern }} + + unittest-target: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_unittest == 'true' + uses: ./.github/workflows/test-py-unittest.yml + with: + ref: ${{ inputs.target_branch }} + python-version: ${{ inputs.python-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: unittest_target_${{ github.event.pull_request.number || github.run_id }} + start-directory: ${{ inputs.unittest-start-directory }} + test-pattern: ${{ inputs.unittest-test-pattern }} + + unittest-compare: + needs: [detect-frameworks, unittest-source, unittest-target] + if: always() && needs.detect-frameworks.outputs.has_unittest == 'true' && needs.unittest-source.result == 'success' + uses: ./.github/workflows/regression-test.yml + with: + runs_on: ${{ inputs.runs_on }} + baseline_label: ${{ inputs.target_branch }} + baseline_results_artifact: unittest_target_${{ github.event.pull_request.number || github.run_id }} + baseline_results_filename: test_data.json + current_label: ${{ github.head_ref || github.ref_name }} + current_results_artifact: unittest_source_${{ github.event.pull_request.number || github.run_id }} + current_results_filename: test_data.json + baseline_passed: ${{ needs.unittest-target.outputs.passed }} + baseline_total: ${{ needs.unittest-target.outputs.total }} + baseline_percentage: ${{ needs.unittest-target.outputs.percentage }} + current_passed: ${{ needs.unittest-source.outputs.passed }} + current_total: ${{ needs.unittest-source.outputs.total }} + current_percentage: ${{ needs.unittest-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.unittest-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.unittest-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.unittest-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.unittest-source.outputs.no_tests_found }} + artifact_name: regression_unittest_${{ github.event.pull_request.number || github.run_id }} + + # ==================== STORYBOOK ==================== + storybook-source: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_storybook == 'true' + uses: ./.github/workflows/test-js-storybook.yml + with: + ref: "" + node-version: ${{ inputs.node-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: storybook_source_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + storybook_port: ${{ inputs.storybook-port }} + storybook_start_command: ${{ inputs.storybook-start-command }} + storybook_test_command: ${{ inputs.storybook-test-command }} + + storybook-target: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_storybook == 'true' + uses: ./.github/workflows/test-js-storybook.yml + with: + ref: ${{ inputs.target_branch }} + node-version: ${{ inputs.node-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: storybook_target_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + storybook_port: ${{ inputs.storybook-port }} + storybook_start_command: ${{ inputs.storybook-start-command }} + storybook_test_command: ${{ inputs.storybook-test-command }} + + storybook-compare: + needs: [detect-frameworks, storybook-source, storybook-target] + if: always() && needs.detect-frameworks.outputs.has_storybook == 'true' && needs.storybook-source.result == 'success' + uses: ./.github/workflows/regression-test.yml + with: + runs_on: ${{ inputs.runs_on }} + baseline_label: ${{ inputs.target_branch }} + baseline_results_artifact: storybook_target_${{ github.event.pull_request.number || github.run_id }} + baseline_results_filename: test_data.json + current_label: ${{ github.head_ref || github.ref_name }} + current_results_artifact: storybook_source_${{ github.event.pull_request.number || github.run_id }} + current_results_filename: test_data.json + baseline_passed: ${{ needs.storybook-target.outputs.passed }} + baseline_total: ${{ needs.storybook-target.outputs.total }} + baseline_percentage: ${{ needs.storybook-target.outputs.percentage }} + current_passed: ${{ needs.storybook-source.outputs.passed }} + current_total: ${{ needs.storybook-source.outputs.total }} + current_percentage: ${{ needs.storybook-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.storybook-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.storybook-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.storybook-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.storybook-source.outputs.no_tests_found }} + artifact_name: regression_storybook_${{ github.event.pull_request.number || github.run_id }} + # ==================== AGGREGATE RESULTS ==================== aggregate-results: - needs: [detect-frameworks, pytest-compare, jest-compare, mocha-compare, cargo-compare, cpp-compare] + needs: [detect-frameworks, pytest-compare, jest-compare, mocha-compare, cargo-compare, cpp-compare, nunit-compare, xunit-compare, unittest-compare, storybook-compare] if: always() runs-on: ${{ fromJSON(inputs.runs_on) }} outputs: @@ -553,13 +871,53 @@ jobs: fi fi + # Check NUnit + if [ "${{ needs.detect-frameworks.outputs.has_nunit }}" == "true" ]; then + NUNIT_REGRESSIONS="${{ needs.nunit-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.nunit-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + NUNIT_REGRESSIONS)) + echo "NUnit regressions: $NUNIT_REGRESSIONS" + fi + fi + + # Check xUnit + if [ "${{ needs.detect-frameworks.outputs.has_xunit }}" == "true" ]; then + XUNIT_REGRESSIONS="${{ needs.xunit-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.xunit-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + XUNIT_REGRESSIONS)) + echo "xUnit regressions: $XUNIT_REGRESSIONS" + fi + fi + + # Check Python unittest + if [ "${{ needs.detect-frameworks.outputs.has_unittest }}" == "true" ]; then + UNITTEST_REGRESSIONS="${{ needs.unittest-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.unittest-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + UNITTEST_REGRESSIONS)) + echo "Python unittest regressions: $UNITTEST_REGRESSIONS" + fi + fi + + # Check Storybook + if [ "${{ needs.detect-frameworks.outputs.has_storybook }}" == "true" ]; then + STORYBOOK_REGRESSIONS="${{ needs.storybook-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.storybook-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + STORYBOOK_REGRESSIONS)) + echo "Storybook regressions: $STORYBOOK_REGRESSIONS" + fi + fi + echo "has_regressions=$HAS_REGRESSIONS" >> "$GITHUB_OUTPUT" echo "regression_count=$TOTAL_REGRESSIONS" >> "$GITHUB_OUTPUT" echo "Total regressions across all frameworks: $TOTAL_REGRESSIONS" # ==================== NOTIFICATIONS ==================== notify: - needs: [detect-frameworks, pytest-source, pytest-target, pytest-compare, jest-source, jest-target, jest-compare, mocha-source, mocha-target, mocha-compare, cargo-source, cargo-target, cargo-compare, cpp-source, cpp-target, cpp-compare, aggregate-results] + needs: [detect-frameworks, pytest-source, pytest-target, pytest-compare, jest-source, jest-target, jest-compare, mocha-source, mocha-target, mocha-compare, cargo-source, cargo-target, cargo-compare, cpp-source, cpp-target, cpp-compare, nunit-source, nunit-target, nunit-compare, xunit-source, xunit-target, xunit-compare, unittest-source, unittest-target, unittest-compare, storybook-source, storybook-target, storybook-compare, aggregate-results] if: always() && needs.aggregate-results.outputs.has_regressions == 'true' runs-on: ${{ fromJSON(inputs.runs_on) }} steps: @@ -631,6 +989,50 @@ jobs: MSG+="\n" fi + # NUnit results + if [ "${{ needs.detect-frameworks.outputs.has_nunit }}" == "true" ]; then + MSG+="**NUnit (.NET):**\n" + MSG+=" Source: ${{ needs.nunit-source.outputs.passed }}/${{ needs.nunit-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.nunit-target.outputs.passed }}/${{ needs.nunit-target.outputs.total }}\n" + if [ "${{ needs.nunit-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.nunit-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + + # xUnit results + if [ "${{ needs.detect-frameworks.outputs.has_xunit }}" == "true" ]; then + MSG+="**xUnit (.NET):**\n" + MSG+=" Source: ${{ needs.xunit-source.outputs.passed }}/${{ needs.xunit-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.xunit-target.outputs.passed }}/${{ needs.xunit-target.outputs.total }}\n" + if [ "${{ needs.xunit-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.xunit-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + + # Python unittest results + if [ "${{ needs.detect-frameworks.outputs.has_unittest }}" == "true" ]; then + MSG+="**Python unittest:**\n" + MSG+=" Source: ${{ needs.unittest-source.outputs.passed }}/${{ needs.unittest-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.unittest-target.outputs.passed }}/${{ needs.unittest-target.outputs.total }}\n" + if [ "${{ needs.unittest-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.unittest-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + + # Storybook results + if [ "${{ needs.detect-frameworks.outputs.has_storybook }}" == "true" ]; then + MSG+="**Storybook:**\n" + MSG+=" Source: ${{ needs.storybook-source.outputs.passed }}/${{ needs.storybook-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.storybook-target.outputs.passed }}/${{ needs.storybook-target.outputs.total }}\n" + if [ "${{ needs.storybook-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.storybook-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + MSG+="Total Regressions: ${{ needs.aggregate-results.outputs.regression_count }}\n\n" MSG+="[View Run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})" diff --git a/.github/workflows/test-cpp-gtest.yml b/.github/workflows/test-cpp-gtest.yml index cd52b39..168587b 100644 --- a/.github/workflows/test-cpp-gtest.yml +++ b/.github/workflows/test-cpp-gtest.yml @@ -256,79 +256,9 @@ jobs: run: | set -euo pipefail - cgroup_auto_workers() { - local n="" - - # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi + # Source shared worker calculation script + source "$GITHUB_WORKSPACE/.github/scripts/cgroup_workers.sh" + WORKERS=$(determine_workers "${{ inputs.parallel_workers }}" '${{ inputs.runs_on }}') echo "Running tests with $WORKERS parallel jobs..." diff --git a/.github/workflows/test-cs-nunit.yml b/.github/workflows/test-cs-nunit.yml index e6d27cb..496c40a 100644 --- a/.github/workflows/test-cs-nunit.yml +++ b/.github/workflows/test-cs-nunit.yml @@ -63,6 +63,9 @@ on: skipped_count: description: "Number of skipped tests" value: ${{ jobs.test.outputs.skipped_count }} + xfailed_count: + description: "Number of xfailed tests" + value: ${{ jobs.test.outputs.xfailed_count }} jobs: test: @@ -82,6 +85,7 @@ jobs: failing_count: ${{ steps.extract-results.outputs.failing_count }} error_count: ${{ steps.extract-results.outputs.error_count }} skipped_count: ${{ steps.extract-results.outputs.skipped_count }} + xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} steps: - name: Checkout @@ -157,79 +161,9 @@ jobs: run: | set -euo pipefail - cgroup_auto_workers() { - local n="" - - # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi + # Source shared worker calculation script + source "$GITHUB_WORKSPACE/.github/scripts/cgroup_workers.sh" + WORKERS=$(determine_workers "${{ inputs.parallel_workers }}" '${{ inputs.runs_on }}') echo "Running NUnit tests with $WORKERS parallel workers..." @@ -348,6 +282,7 @@ jobs: f.write(f'failing_count={len(failing_tests)}\n') f.write(f'error_count={len(error_tests)}\n') f.write(f'skipped_count={len(skipped_tests)}\n') + f.write(f'xfailed_count={len(xfailed_tests)}\n') " - name: Upload test artifacts diff --git a/.github/workflows/test-cs-xunit.yml b/.github/workflows/test-cs-xunit.yml index fa0fe1f..a0293f3 100644 --- a/.github/workflows/test-cs-xunit.yml +++ b/.github/workflows/test-cs-xunit.yml @@ -63,6 +63,9 @@ on: skipped_count: description: "Number of skipped tests" value: ${{ jobs.test.outputs.skipped_count }} + xfailed_count: + description: "Number of xfailed tests" + value: ${{ jobs.test.outputs.xfailed_count }} jobs: test: @@ -82,6 +85,7 @@ jobs: failing_count: ${{ steps.extract-results.outputs.failing_count }} error_count: ${{ steps.extract-results.outputs.error_count }} skipped_count: ${{ steps.extract-results.outputs.skipped_count }} + xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} steps: - name: Checkout @@ -157,79 +161,9 @@ jobs: run: | set -euo pipefail - cgroup_auto_workers() { - local n="" - - # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi + # Source shared worker calculation script + source "$GITHUB_WORKSPACE/.github/scripts/cgroup_workers.sh" + WORKERS=$(determine_workers "${{ inputs.parallel_workers }}" '${{ inputs.runs_on }}') echo "Running xUnit tests with $WORKERS parallel workers..." @@ -348,6 +282,7 @@ jobs: f.write(f'failing_count={len(failing_tests)}\n') f.write(f'error_count={len(error_tests)}\n') f.write(f'skipped_count={len(skipped_tests)}\n') + f.write(f'xfailed_count={len(xfailed_tests)}\n') " - name: Upload test artifacts diff --git a/.github/workflows/test-js-jest.yml b/.github/workflows/test-js-jest.yml index c25294a..060239b 100644 --- a/.github/workflows/test-js-jest.yml +++ b/.github/workflows/test-js-jest.yml @@ -207,79 +207,9 @@ jobs: run: | set -euo pipefail - cgroup_auto_workers() { - local n="" - - # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi + # Source shared worker calculation script + source "$GITHUB_WORKSPACE/.github/scripts/cgroup_workers.sh" + WORKERS=$(determine_workers "${{ inputs.parallel_workers }}" '${{ inputs.runs_on }}') echo "Running tests with $WORKERS workers..." diff --git a/.github/workflows/test-js-mocha.yml b/.github/workflows/test-js-mocha.yml index e1ce59a..8a06cb7 100644 --- a/.github/workflows/test-js-mocha.yml +++ b/.github/workflows/test-js-mocha.yml @@ -219,79 +219,9 @@ jobs: run: | set -euo pipefail - cgroup_auto_workers() { - local n="" - - # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi + # Source shared worker calculation script + source "$GITHUB_WORKSPACE/.github/scripts/cgroup_workers.sh" + WORKERS=$(determine_workers "${{ inputs.parallel_workers }}" '${{ inputs.runs_on }}') echo "Running tests with $WORKERS workers..." diff --git a/.github/workflows/test-js-storybook.yml b/.github/workflows/test-js-storybook.yml new file mode 100644 index 0000000..ba53cb6 --- /dev/null +++ b/.github/workflows/test-js-storybook.yml @@ -0,0 +1,280 @@ +name: Reusable Storybook Test Runner + +on: + workflow_call: + inputs: + ref: + description: "Git ref to checkout and test. Leave empty for default checkout." + required: false + type: string + default: "" + node-version: + description: "Node.js version to use for Storybook tests." + required: false + type: string + default: "18.x" + runs_on: + description: "Runner label for the test job." + required: false + type: string + default: '["self-hosted", "multithreaded"]' + artifact_name: + description: "Name for the test results artifact." + required: true + type: string + parallel_workers: + description: "Number of parallel workers. Leave empty for runner default (6 for multithreaded, 1 for singlethreaded). Use 'auto' for cgroup-aware CPU count, or a number." + required: false + type: string + default: "" + storybook_port: + description: "Port for Storybook server." + required: false + type: string + default: "6006" + storybook_start_command: + description: "Command to start Storybook server." + required: false + type: string + default: "npm run storybook" + storybook_test_command: + description: "Command to run Storybook tests." + required: false + type: string + default: "npm run storybook-test" + working-directory: + description: "Directory where storybook commands should be executed." + required: false + type: string + default: "." + outputs: + total: + description: "Total number of tests" + value: ${{ jobs.test.outputs.total }} + passed: + description: "Number of passing tests" + value: ${{ jobs.test.outputs.passed }} + percentage: + description: "Pass percentage" + value: ${{ jobs.test.outputs.percentage }} + collection_errors: + description: "Whether collection errors occurred" + value: ${{ jobs.test.outputs.collection_errors }} + no_tests_found: + description: "Whether no tests were found" + value: ${{ jobs.test.outputs.no_tests_found }} + has_errors: + description: "Whether any errors occurred" + value: ${{ jobs.test.outputs.has_errors }} + error_type: + description: "Type of error if any" + value: ${{ jobs.test.outputs.error_type }} + failing_count: + description: "Number of failing tests" + value: ${{ jobs.test.outputs.failing_count }} + error_count: + description: "Number of errored tests" + value: ${{ jobs.test.outputs.error_count }} + skipped_count: + description: "Number of skipped tests" + value: ${{ jobs.test.outputs.skipped_count }} + xfailed_count: + description: "Number of xfailed tests" + value: ${{ jobs.test.outputs.xfailed_count }} + +jobs: + test: + runs-on: ${{ fromJSON(inputs.runs_on) }} + defaults: + run: + shell: bash + working-directory: ${{ inputs['working-directory'] }} + outputs: + total: ${{ steps.extract-results.outputs.total }} + passed: ${{ steps.extract-results.outputs.passed }} + percentage: ${{ steps.extract-results.outputs.percentage }} + collection_errors: ${{ steps.check-collection.outputs.has_collection_errors }} + no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} + has_errors: ${{ steps.check-collection.outputs.has_errors }} + error_type: ${{ steps.check-collection.outputs.error_type }} + failing_count: ${{ steps.extract-results.outputs.failing_count }} + error_count: ${{ steps.extract-results.outputs.error_count }} + skipped_count: ${{ steps.extract-results.outputs.skipped_count }} + xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} + + steps: + - name: Checkout + uses: actions/checkout@v4.2.2 + with: + submodules: "recursive" + ref: ${{ inputs.ref || github.ref }} + + - name: Use Node.js ${{ inputs.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ inputs.node-version }} + cache: "npm" + + - name: Install dependencies + run: npm ci + + - name: Install Playwright browsers + run: npx playwright install --with-deps + + - name: Check for Storybook configuration + id: check-collection + run: | + HAS_COLLECTION_ERRORS="false" + NO_TESTS_FOUND="false" + ERROR_TYPE="none" + + # Check if storybook is properly configured + if ! npm ls @storybook/test-runner >/dev/null 2>&1; then + echo "::warning::@storybook/test-runner not found in dependencies" + fi + + # Try to verify storybook can start + echo "Storybook configuration check passed" + echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> "$GITHUB_OUTPUT" + echo "no_tests_found=$NO_TESTS_FOUND" >> "$GITHUB_OUTPUT" + echo "error_type=$ERROR_TYPE" >> "$GITHUB_OUTPUT" + echo "has_errors=false" >> "$GITHUB_OUTPUT" + + - name: Start Storybook + id: start-storybook + run: | + ${{ inputs.storybook_start_command }} -- --port ${{ inputs.storybook_port }} & + STORYBOOK_PID=$! + echo "storybook_pid=$STORYBOOK_PID" >> "$GITHUB_OUTPUT" + + - name: Wait for Storybook + run: | + echo "Waiting for Storybook to start on port ${{ inputs.storybook_port }}..." + timeout=120 + counter=0 + until $(curl --output /dev/null --silent --head --fail http://localhost:${{ inputs.storybook_port }}); do + if [ $counter -ge $timeout ]; then + echo "::error::Timed out waiting for Storybook to start" + exit 1 + fi + echo "Waiting for Storybook... ($counter seconds so far)" + sleep 5 + counter=$((counter + 5)) + done + echo "Storybook is up and running on port ${{ inputs.storybook_port }}!" + + - name: Run Storybook tests + id: run-tests + continue-on-error: true + run: | + set -euo pipefail + + # Source shared worker calculation script + source "$GITHUB_WORKSPACE/.github/scripts/cgroup_workers.sh" + WORKERS=$(determine_workers "${{ inputs.parallel_workers }}" '${{ inputs.runs_on }}') + + echo "Running Storybook tests with $WORKERS workers..." + + WORKER_FLAGS="" + if [ "$WORKERS" != "1" ]; then + WORKER_FLAGS="--maxWorkers=$WORKERS" + fi + + set +e + ${{ inputs.storybook_test_command }} -- --url http://localhost:${{ inputs.storybook_port }} $WORKER_FLAGS --json --outputFile=storybook_results.json 2>&1 | tee test_output.txt + TEST_EXIT=$? + set -e + + echo "storybook_exit_code=$TEST_EXIT" >> "$GITHUB_OUTPUT" + + if [ ! -f storybook_results.json ]; then + echo '{"testResults": [], "numTotalTests": 0, "numPassedTests": 0, "numFailedTests": 0}' > storybook_results.json + fi + + echo "Storybook tests completed (exit code: $TEST_EXIT)." + + - name: Extract test results + id: extract-results + run: | + python3 -c " + import json + import os + + total = passed = failed = skipped = 0 + percentage = 0.0 + passing_tests = [] + failing_tests = [] + error_tests = [] + skipped_tests = [] + xfailed_tests = [] + xpassed_tests = [] + all_tests = [] + + try: + with open('storybook_results.json') as f: + results = json.load(f) + + # Parse Jest-style Storybook results + total = results.get('numTotalTests', 0) + passed = results.get('numPassedTests', 0) + failed = results.get('numFailedTests', 0) + skipped = results.get('numPendingTests', 0) + results.get('numTodoTests', 0) + + for test_result in results.get('testResults', []): + for assertion in test_result.get('assertionResults', []): + title = assertion.get('fullName', assertion.get('title', 'unknown')) + status = assertion.get('status', '') + all_tests.append(title) + + if status == 'passed': + passing_tests.append(title) + elif status == 'failed': + failing_tests.append(title) + elif status in ['pending', 'todo', 'skipped']: + skipped_tests.append(title) + + percentage = (passed / total * 100) if total > 0 else 0 + except FileNotFoundError: + print('Results file not found') + except Exception as e: + print(f'Error: {e}') + + # Save artifact data in standardized format + with open('test_data.json', 'w') as f: + json.dump({ + 'passing_tests': passing_tests, + 'failing_tests': failing_tests, + 'error_tests': error_tests, + 'skipped_tests': skipped_tests, + 'xfailed_tests': xfailed_tests, + 'xpassed_tests': xpassed_tests, + 'all_tests': all_tests, + 'skipped_tests_with_reasons': {}, + 'xfailed_tests_with_reasons': {}, + 'warnings': [] + }, f, indent=2) + + print(f'Results: {passed}/{total} ({percentage:.1f}%)') + + with open(os.environ['GITHUB_OUTPUT'], 'a') as f: + f.write(f'total={total}\n') + f.write(f'passed={passed}\n') + f.write(f'percentage={percentage:.2f}\n') + f.write(f'failing_count={len(failing_tests)}\n') + f.write(f'error_count={len(error_tests)}\n') + f.write(f'skipped_count={len(skipped_tests)}\n') + f.write(f'xfailed_count={len(xfailed_tests)}\n') + " + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.artifact_name }} + path: | + ${{ inputs['working-directory'] }}/test_data.json + ${{ inputs['working-directory'] }}/test_output.txt + ${{ inputs['working-directory'] }}/storybook_results.json + ${{ inputs['working-directory'] }}/*-snapshots/ + retention-days: 3 + if-no-files-found: ignore diff --git a/.github/workflows/test-py-pytest.yml b/.github/workflows/test-py-pytest.yml index 44a80cb..1a0ccb9 100644 --- a/.github/workflows/test-py-pytest.yml +++ b/.github/workflows/test-py-pytest.yml @@ -155,79 +155,9 @@ jobs: run: | set -euo pipefail - cgroup_auto_workers() { - local n="" - - # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi + # Source shared worker calculation script + source "$GITHUB_WORKSPACE/.github/scripts/cgroup_workers.sh" + WORKERS=$(determine_workers "${{ inputs.parallel_workers }}" '${{ inputs.runs_on }}') echo "Running tests with $WORKERS workers..." diff --git a/.github/workflows/test-py-unittest.yml b/.github/workflows/test-py-unittest.yml index 2c37a34..3e60fef 100644 --- a/.github/workflows/test-py-unittest.yml +++ b/.github/workflows/test-py-unittest.yml @@ -1,17 +1,27 @@ -name: Reusable Compare unittest Results +name: Reusable Unittest Runner on: workflow_call: inputs: - target_branch_to_compare: - description: "The target branch to compare against (e.g., main, refs/heads/main)." - required: true + ref: + description: "Git ref to checkout and test. Leave empty for default checkout." + required: false type: string + default: "" python-version: description: "Python version to use for testing." required: false type: string default: "3.10" + runs_on: + description: "Runner label for the test job." + required: false + type: string + default: '["self-hosted", "multithreaded"]' + artifact_name: + description: "Name for the test results artifact." + required: true + type: string start-directory: description: "Directory passed to unittest discovery." required: false @@ -32,56 +42,48 @@ on: required: false type: string default: "." - ping_latest_committer: - description: "If true, the latest committer on the PR will be added to the ping list." - required: false - type: boolean - default: false - runs_on: + parallel_workers: + description: "Number of parallel workers. Leave empty for runner default (6 for multithreaded, 1 for singlethreaded). Use 'auto' for cgroup-aware CPU count, or a number. Note: unittest has limited parallel support." required: false type: string - default: '["self-hosted", "multithreaded"]' - secrets: - DISCORD_WEBHOOK_URL: - description: "Discord Webhook URL for failure notifications. If not provided, notifications are skipped." - required: false - DISCORD_USER_MAP: - description: 'JSON string mapping GitHub usernames to Discord User IDs (e.g., {"user1":"id1"}). If not provided, users won''t be pinged.' - required: false + default: "" outputs: - pr_total: - description: "Total tests in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.total }} - pr_passed: - description: "Passed tests in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.passed }} - pr_percentage: - description: "Pass percentage in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.percentage }} - pr_collection_errors: - description: "PR branch has collection errors" - value: ${{ jobs.test-source-branch.outputs.collection_errors }} - pr_no_tests_found: - description: "PR branch has no tests found" - value: ${{ jobs.test-source-branch.outputs.no_tests_found }} - target_total: - description: "Total tests in target branch" - value: ${{ jobs.test-target-branch.outputs.total }} - target_passed: - description: "Passed tests in target branch" - value: ${{ jobs.test-target-branch.outputs.passed }} - target_percentage: - description: "Pass percentage in target branch" - value: ${{ jobs.test-target-branch.outputs.percentage }} - has_regressions: - description: "Boolean indicating if regressions were found" - value: ${{ jobs.compare-results.outputs.has_regressions }} - regression_count: - description: "Number of test regressions found" - value: ${{ jobs.compare-results.outputs.regression_count }} + total: + description: "Total number of tests" + value: ${{ jobs.test.outputs.total }} + passed: + description: "Number of passing tests" + value: ${{ jobs.test.outputs.passed }} + percentage: + description: "Pass percentage" + value: ${{ jobs.test.outputs.percentage }} + collection_errors: + description: "Whether collection errors occurred" + value: ${{ jobs.test.outputs.collection_errors }} + no_tests_found: + description: "Whether no tests were found" + value: ${{ jobs.test.outputs.no_tests_found }} + has_errors: + description: "Whether any errors occurred" + value: ${{ jobs.test.outputs.has_errors }} + error_type: + description: "Type of error if any" + value: ${{ jobs.test.outputs.error_type }} + failing_count: + description: "Number of failing tests" + value: ${{ jobs.test.outputs.failing_count }} + error_count: + description: "Number of errored tests" + value: ${{ jobs.test.outputs.error_count }} + skipped_count: + description: "Number of skipped tests" + value: ${{ jobs.test.outputs.skipped_count }} + xfailed_count: + description: "Number of xfailed tests" + value: ${{ jobs.test.outputs.xfailed_count }} jobs: - test-source-branch: + test: runs-on: ${{ fromJSON(inputs.runs_on) }} defaults: run: @@ -95,35 +97,35 @@ jobs: no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} has_errors: ${{ steps.check-collection.outputs.has_errors }} error_type: ${{ steps.check-collection.outputs.error_type }} - error_details: ${{ steps.check-collection.outputs.error_details }} failing_count: ${{ steps.extract-results.outputs.failing_count }} + error_count: ${{ steps.extract-results.outputs.error_count }} skipped_count: ${{ steps.extract-results.outputs.skipped_count }} xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} steps: - - name: Checkout PR Branch + - name: Checkout uses: actions/checkout@v4.2.2 with: submodules: "recursive" + ref: ${{ inputs.ref || github.ref }} - name: Set up Python uses: actions/setup-python@v5.3.0 with: python-version: "${{ inputs.python-version }}" - - name: Set up virtual environment and install dependencies + - name: Install dependencies run: | - VENV_PATH="$PWD/.venv" - python -m venv "$VENV_PATH" - source "$VENV_PATH/bin/activate" python -m pip install --upgrade pip if [ -f requirements.txt ]; then pip install -r requirements.txt fi - echo "VENV_PATH=$VENV_PATH" >> $GITHUB_ENV + PYPROJECT=$(find . -name "pyproject.toml" -type f | head -n 1) + if [ -n "$PYPROJECT" ]; then + pip install -e "$(dirname "$PYPROJECT")[dev]" 2>/dev/null || pip install -e "$(dirname "$PYPROJECT")" 2>/dev/null || true + fi - - &prepare-unittest-helper - name: Prepare unittest JSON helper + - name: Prepare unittest JSON helper run: | cat <<'PY' > "$RUNNER_TEMP/unittest_to_json.py" import argparse @@ -355,12 +357,10 @@ jobs: id: check-collection run: | echo "Running unittest collection check..." - source "$VENV_PATH/bin/activate" HAS_COLLECTION_ERRORS="false" NO_TESTS_FOUND="false" ERROR_TYPE="none" - ERROR_DETAILS="none" set +e python "$UNITTEST_JSON_HELPER" \ @@ -375,17 +375,15 @@ jobs: if [ "$EXIT_CODE" -ne 0 ]; then HAS_COLLECTION_ERRORS="true" ERROR_TYPE="CollectionError" - ERROR_DETAILS=$(head -40 collection_output.txt | tr '\n' ' ' | sed 's/"/\\"/g') - echo "::error::Unittest discovery failed in the PR branch (${ERROR_DETAILS:0:200}...)" + echo "::error::Unittest discovery failed" else if [ -f unittest_collection.json ]; then - TEST_COUNT=$(python - <<'PY' -import json -with open('unittest_collection.json', 'r', encoding='utf-8') as handle: - payload = json.load(handle) -print(len(payload.get('tests', []))) -PY -) + TEST_COUNT=$(python3 -c " + import json + with open('unittest_collection.json', 'r', encoding='utf-8') as handle: + payload = json.load(handle) + print(len(payload.get('tests', []))) + ") else TEST_COUNT=0 fi @@ -393,1078 +391,145 @@ PY if [ -z "$TEST_COUNT" ] || [ "$TEST_COUNT" = "0" ]; then NO_TESTS_FOUND="true" ERROR_TYPE="NoTestsFound" - ERROR_DETAILS="unittest discovery did not find any tests" - echo "::warning::No unittest tests were found in the PR branch" + echo "::warning::No unittest tests were found" else - echo "Found $TEST_COUNT unittest test(s) in the PR branch" + echo "Found $TEST_COUNT unittest test(s)" fi fi - # Set all the outputs - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT - - # For backward compatibility + echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> "$GITHUB_OUTPUT" + echo "no_tests_found=$NO_TESTS_FOUND" >> "$GITHUB_OUTPUT" + echo "error_type=$ERROR_TYPE" >> "$GITHUB_OUTPUT" if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT + echo "has_errors=true" >> "$GITHUB_OUTPUT" else - echo "has_errors=false" >> $GITHUB_OUTPUT + echo "has_errors=false" >> "$GITHUB_OUTPUT" fi - # Clean summary output - if [[ "$HAS_COLLECTION_ERRORS" == "true" ]]; then - echo "❌ Discovery Error: $ERROR_TYPE" - elif [[ "$NO_TESTS_FOUND" == "true" ]]; then - echo "⚠️ No Tests Found" - else - echo "✅ Discovery Success" - fi - - - name: Run tests on PR Branch + - name: Run tests + id: run-tests + continue-on-error: true if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - echo "Running unittest suite on PR branch..." - source "$VENV_PATH/bin/activate" - - set +e - python "$UNITTEST_JSON_HELPER" \ - --start-directory "${{ inputs['start-directory'] }}" \ - --pattern "${{ inputs['test-pattern'] }}" \ - --top-level-directory "${{ inputs['top-level-directory'] }}" \ - --output pr_results.json > test_output.txt 2>&1 - EXIT_CODE=$? - set -e - - if [ -s pr_results.json ]; then - echo "✅ Test execution completed (exit code: $EXIT_CODE)" - else - echo "❌ Test execution failed (exit code: $EXIT_CODE)" - fi - - - name: Extract test results and create artifacts - id: extract-results - run: | - echo "PR_BRANCH=$(git rev-parse --abbrev-ref HEAD)" >> $GITHUB_ENV - - python -c " - import json - import sys - import os - - # Default values in case file doesn't exist or is invalid - pr_total = 0 - pr_passed = 0 - pr_percentage = 0 - failing_tests = [] - skipped_tests = [] - xfailed_tests = [] - all_tests = [] - passing_tests = [] - skipped_tests_with_reasons = {} - xfailed_tests_with_reasons = {} - - try: - print('Attempting to open pr_results.json') - with open('pr_results.json') as f: - pr_results = json.load(f) - print(f'JSON loaded successfully, keys: {list(pr_results.keys())}') - - # Check for collection errors by looking at exitcode or error patterns - if pr_results.get('exitcode', 0) > 1: - print('Detected non-zero exitcode, likely a collection error') - if 'collectors' in pr_results and pr_results['collectors']: - print(f'Collection errors found: {pr_results["collectors"]}') - pr_total = 0 # Explicitly set to 0 - no tests run when collection fails - pr_passed = 0 - elif 'summary' in pr_results and isinstance(pr_results['summary'], dict): - # Normal case - extract data from summary - summary = pr_results['summary'] - pr_total = summary.get('total', 0) - pr_passed = summary.get('passed', 0) - print(f'Results extracted from summary - Total: {pr_total}, Passed: {pr_passed}') - - # Extract all tests by outcome and collect all test nodeids with reasons - if 'tests' in pr_results: - print('Extracting failing, skipped, xfailed, and all tests with reasons') - for test in pr_results['tests']: - outcome = test.get('outcome') - nodeid = test.get('nodeid', '') - if nodeid: - all_tests.append(nodeid) # Track all tests regardless of outcome - if outcome == 'passed': - passing_tests.append(nodeid) - elif outcome in ['failed', 'error']: - failing_tests.append(nodeid) - elif outcome == 'skipped': - skipped_tests.append(nodeid) - # Extract skip reason - skip_reason = 'No reason provided' - if 'longrepr' in test and test['longrepr']: - # longrepr can be a string or list, handle both - longrepr = test['longrepr'] - if isinstance(longrepr, list) and longrepr: - skip_reason = str(longrepr[0]) if longrepr[0] else 'No reason provided' - elif isinstance(longrepr, str): - skip_reason = longrepr - elif 'call' in test and test['call'] and 'longrepr' in test['call']: - skip_reason = str(test['call']['longrepr']) - skipped_tests_with_reasons[nodeid] = skip_reason.strip() - elif outcome == 'xfailed': - xfailed_tests.append(nodeid) - # Extract xfail reason - xfail_reason = 'No reason provided' - if 'longrepr' in test and test['longrepr']: - longrepr = test['longrepr'] - if isinstance(longrepr, list) and longrepr: - xfail_reason = str(longrepr[0]) if longrepr[0] else 'No reason provided' - elif isinstance(longrepr, str): - xfail_reason = longrepr - elif 'call' in test and test['call'] and 'longrepr' in test['call']: - xfail_reason = str(test['call']['longrepr']) - xfailed_tests_with_reasons[nodeid] = xfail_reason.strip() - - print(f'Found {len(passing_tests)} passing tests') - print(f'Found {len(failing_tests)} failing tests') - print(f'Found {len(skipped_tests)} skipped tests') - print(f'Found {len(xfailed_tests)} xfailed tests') - print(f'Found {len(all_tests)} total discovered tests') - else: - print('No valid summary structure found') - - # Calculate percentage safely - pr_percentage = (pr_passed / pr_total * 100) if pr_total > 0 else 0 - print(f'Pass percentage calculated: {pr_percentage:.2f}%') - - except FileNotFoundError as e: - print(f'File not found error: {e}') - except KeyError as e: - print(f'Missing key in results file: {e}') - if 'pr_results' in locals(): - print(f'Available keys: {list(pr_results.keys())}') - if 'summary' in pr_results: - print(f'Summary structure: {pr_results["summary"]}') - except Exception as e: - print(f'Error processing results: {e}') - import traceback - print(f'Full exception: {traceback.format_exc()}') - - print(f'Total tests: {pr_total}') - print(f'Passed tests: {pr_passed}') - print(f'Pass percentage: {pr_percentage:.2f}%') - print(f'Failing tests: {len(failing_tests)}') - print(f'Skipped tests: {len(skipped_tests)}') - print(f'Xfailed tests: {len(xfailed_tests)}') - print(f'All discovered tests: {len(all_tests)}') - - # Extract warnings from test output - warnings_list = [] - try: - with open('test_output.txt', 'r') as f: - content = f.read() - # Extract warnings section - if '============================== warnings summary ===============================' in content: - warnings_section = content.split('============================== warnings summary ===============================')[1] - if '-- Docs:' in warnings_section: - warnings_section = warnings_section.split('-- Docs:')[0] - - # Parse warnings - format is file path followed by indented warning details - lines = warnings_section.split('\\n') - current_warning_group = [] - - for line in lines: - line = line.rstrip() - if not line or line.startswith('='): - continue - - # Check if this is a file path (starts at column 0, ends with .py: or contains warning count) - if not line.startswith(' ') and ('.py:' in line or 'warnings' in line): - # Save previous warning group if exists - if current_warning_group: - warnings_list.append('\\n'.join(current_warning_group)) - # Start new warning group - current_warning_group = [line] - elif line.startswith(' ') and current_warning_group: - # This is part of the current warning (indented line) - current_warning_group.append(line) - - # Don't forget the last warning group - if current_warning_group: - warnings_list.append('\\n'.join(current_warning_group)) - - print(f'Extracted {len(warnings_list)} warning groups from test output') - except Exception as e: - print(f'Could not extract warnings: {e}') - - # Save test lists to artifact files instead of job outputs - test_data = { - 'passing_tests': passing_tests, - 'failing_tests': failing_tests, - 'skipped_tests': skipped_tests, - 'xfailed_tests': xfailed_tests, - 'all_tests': all_tests, - 'skipped_tests_with_reasons': skipped_tests_with_reasons, - 'xfailed_tests_with_reasons': xfailed_tests_with_reasons, - 'warnings': warnings_list - } - - with open('pr_test_data.json', 'w') as f: - json.dump(test_data, f, indent=2) - - print('Test data saved to pr_test_data.json for artifact') - print('Results extraction completed') - - # Set scalar outputs only (no large arrays) - with open(os.environ['GITHUB_OUTPUT'], 'a') as f: - f.write(f'total={pr_total}\\n') - f.write(f'passed={pr_passed}\\n') - f.write(f'percentage={pr_percentage:.2f}\\n') - f.write(f'failing_count={len(failing_tests)}\\n') - f.write(f'skipped_count={len(skipped_tests)}\\n') - f.write(f'xfailed_count={len(xfailed_tests)}\\n') - " - - echo "✅ Test results: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} passed (${{ steps.extract-results.outputs.percentage }}%)" - - - name: Upload PR branch artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - path: | - pr_test_data.json - test_output.txt - pr_results.json - collection_output.txt - unittest_collection.json - retention-days: 3 - if-no-files-found: ignore - - test-target-branch: - runs-on: ${{ fromJSON(inputs.runs_on) }} - defaults: - run: - shell: bash - working-directory: ${{ inputs['working-directory'] }} - outputs: - total: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.total || steps.extract-results.outputs.total }} - passed: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passed || steps.extract-results.outputs.passed }} - percentage: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.percentage || steps.extract-results.outputs.percentage }} - collection_errors: ${{ steps.check-collection.outputs.has_collection_errors }} - no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} - has_errors: ${{ steps.check-collection.outputs.has_errors }} - error_type: ${{ steps.check-collection.outputs.error_type }} - error_details: ${{ steps.check-collection.outputs.error_details }} - passing_count: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passing_count || steps.extract-results.outputs.passing_count }} - - steps: - - name: Checkout target branch - uses: actions/checkout@v4.2.2 - with: - submodules: "recursive" - ref: ${{ inputs.target_branch_to_compare }} - - - name: Set up Python - uses: actions/setup-python@v5.3.0 - with: - python-version: "${{ inputs.python-version }}" - - - name: Set up virtual environment and install dependencies - run: | - VENV_PATH="$PWD/.venv" - python -m venv "$VENV_PATH" - source "$VENV_PATH/bin/activate" - python -m pip install --upgrade pip - if [ -f requirements.txt ]; then - pip install -r requirements.txt - fi - echo "VENV_PATH=$VENV_PATH" >> $GITHUB_ENV - - - *prepare-unittest-helper - - - name: Check for test collection errors - id: check-collection - run: | - # Create verbose debug file for artifact - exec 3>&1 4>&2 - exec 1> >(tee -a debug_target_collection.log) 2>&1 + set -euo pipefail - echo "Running unittest collection check on target branch..." - source "$VENV_PATH/bin/activate" - - HAS_COLLECTION_ERRORS="false" - NO_TESTS_FOUND="false" - ERROR_TYPE="none" - ERROR_DETAILS="none" + echo "Running unittest suite..." set +e python "$UNITTEST_JSON_HELPER" \ - --collect-only \ --start-directory "${{ inputs['start-directory'] }}" \ --pattern "${{ inputs['test-pattern'] }}" \ --top-level-directory "${{ inputs['top-level-directory'] }}" \ - --output unittest_collection.json > collection_output.txt 2>&1 + --output results.json 2>&1 | tee test_output.txt EXIT_CODE=$? set -e - exec 1>&3 2>&4 + echo "unittest_exit_code=$EXIT_CODE" >> "$GITHUB_OUTPUT" - if [ "$EXIT_CODE" -ne 0 ]; then - HAS_COLLECTION_ERRORS="true" - ERROR_TYPE="CollectionError" - ERROR_DETAILS=$(head -40 collection_output.txt | tr '\n' ' ' | sed 's/"/\\"/g') - echo "::warning::Unittest discovery failed on the target branch (${ERROR_DETAILS:0:200}...)" - else - if [ -f unittest_collection.json ]; then - TEST_COUNT=$(python - <<'PY' -import json -with open('unittest_collection.json', 'r', encoding='utf-8') as handle: - payload = json.load(handle) -print(len(payload.get('tests', []))) -PY -) - else - TEST_COUNT=0 - fi - - if [ -z "$TEST_COUNT" ] || [ "$TEST_COUNT" = "0" ]; then - NO_TESTS_FOUND="true" - ERROR_TYPE="NoTestsFound" - ERROR_DETAILS="unittest discovery did not find any tests on the target branch" - echo "::warning::No unittest tests were found in the target branch" - else - echo "Found $TEST_COUNT unittest test(s) in the target branch" - fi - fi - - # Set all the outputs - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT - - # For backward compatibility - if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT - else - echo "has_errors=false" >> $GITHUB_OUTPUT - fi - - # Save full collection output to debug file for artifact - echo "=== FULL COLLECTION OUTPUT ===" >> debug_target_collection.log - cat collection_output.txt >> debug_target_collection.log - - - name: Run tests on target branch - if: steps.check-collection.outputs.has_collection_errors != 'true' - run: | - echo "Running unittest suite on target branch..." - source "$VENV_PATH/bin/activate" - - set +e - python "$UNITTEST_JSON_HELPER" \ - --start-directory "${{ inputs['start-directory'] }}" \ - --pattern "${{ inputs['test-pattern'] }}" \ - --top-level-directory "${{ inputs['top-level-directory'] }}" \ - --output target_results.json > target_test_output.txt 2>&1 - EXIT_CODE=$? - set -e - - if [ -s target_results.json ]; then + if [ -s results.json ]; then echo "✅ Test execution completed (exit code: $EXIT_CODE)" else echo "❌ Test execution failed (exit code: $EXIT_CODE)" + echo '{"exitcode": 1, "summary": {"total": 0, "passed": 0}, "tests": []}' > results.json fi - - name: Extract test results and create artifacts + - name: Extract test results id: extract-results - # Only run if there were no collection errors - if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - echo "Processing test results for target branch: ${{ inputs.target_branch_to_compare }}" - - # Create debug file for detailed output - exec 3>&1 4>&2 - exec 1> >(tee -a debug_target_extract_results.log) 2>&1 - - python -c " + python3 -c " import json - import sys import os - print('Starting test results extraction script for target branch') - - # Default values in case file doesn't exist or is invalid - target_total = 0 - target_passed = 0 - target_percentage = 0 + total = passed = 0 + percentage = 0.0 passing_tests = [] failing_tests = [] + error_tests = [] skipped_tests = [] xfailed_tests = [] + xpassed_tests = [] all_tests = [] - - try: - print('Attempting to open target_results.json') - with open('target_results.json') as f: - target_results = json.load(f) - print(f'JSON loaded successfully, keys: {list(target_results.keys())}') - - # Check for collection errors by looking at exitcode or error patterns - if target_results.get('exitcode', 0) > 1: - print('Detected non-zero exitcode, likely a collection error') - if 'collectors' in target_results and target_results['collectors']: - print(f'Collection errors found: {target_results["collectors"]}') - target_total = 0 # Explicitly set to 0 - no tests run when collection fails - target_passed = 0 - elif 'summary' in target_results and isinstance(target_results['summary'], dict): - # Normal case - extract data from summary - summary = target_results['summary'] - target_total = summary.get('total', 0) - target_passed = summary.get('passed', 0) - print(f'Results extracted from summary - Total: {target_total}, Passed: {target_passed}') - - # Extract all test outcomes - if 'tests' in target_results: - print('Extracting all test outcomes from target') - for test in target_results['tests']: - outcome = test.get('outcome') - nodeid = test.get('nodeid', '') - if nodeid: - all_tests.append(nodeid) # Track all tests regardless of outcome - if outcome == 'passed': - passing_tests.append(nodeid) - elif outcome in ['failed', 'error']: - failing_tests.append(nodeid) - elif outcome == 'skipped': - skipped_tests.append(nodeid) - elif outcome == 'xfailed': - xfailed_tests.append(nodeid) - - print(f'Found {len(passing_tests)} passing tests') - print(f'Found {len(failing_tests)} failing tests') - print(f'Found {len(skipped_tests)} skipped tests') - print(f'Found {len(xfailed_tests)} xfailed tests') - print(f'Found {len(all_tests)} total discovered tests') - else: - print('No valid summary structure found') - - # Calculate percentage safely - target_percentage = (target_passed / target_total * 100) if target_total > 0 else 0 - print(f'Pass percentage calculated: {target_percentage:.2f}%') - - except FileNotFoundError as e: - print(f'File not found error: {e}') - except KeyError as e: - print(f'Missing key in results file: {e}') - if 'target_results' in locals(): - print(f'Available keys: {list(target_results.keys())}') - if 'summary' in target_results: - print(f'Summary structure: {target_results["summary"]}') - except Exception as e: - print(f'Error processing results: {e}') - import traceback - print(f'Full exception: {traceback.format_exc()}') - - print(f'Total tests: {target_total}') - print(f'Passed tests: {target_passed}') - print(f'Pass percentage: {target_percentage:.2f}%') - print(f'Passing tests: {len(passing_tests)}') - print(f'All discovered tests: {len(all_tests)}') - - # Extract warnings from test output + skipped_with_reasons = {} + xfailed_with_reasons = {} warnings_list = [] + try: - with open('target_test_output.txt', 'r') as f: - content = f.read() - # Extract warnings section - if '============================== warnings summary ===============================' in content: - warnings_section = content.split('============================== warnings summary ===============================')[1] - if '-- Docs:' in warnings_section: - warnings_section = warnings_section.split('-- Docs:')[0] - - # Parse warnings - format is file path followed by indented warning details - lines = warnings_section.split('\\n') - current_warning_group = [] - - for line in lines: - line = line.rstrip() - if not line or line.startswith('='): - continue - - # Check if this is a file path (starts at column 0, ends with .py: or contains warning count) - if not line.startswith(' ') and ('.py:' in line or 'warnings' in line): - # Save previous warning group if exists - if current_warning_group: - warnings_list.append('\\n'.join(current_warning_group)) - # Start new warning group - current_warning_group = [line] - elif line.startswith(' ') and current_warning_group: - # This is part of the current warning (indented line) - current_warning_group.append(line) - - # Don't forget the last warning group - if current_warning_group: - warnings_list.append('\\n'.join(current_warning_group)) - - print(f'Extracted {len(warnings_list)} warning groups from target test output') + with open('results.json') as f: + results = json.load(f) + + if 'summary' in results: + summary = results['summary'] + total = summary.get('total', 0) + passed = summary.get('passed', 0) + + for test in results.get('tests', []): + outcome = test.get('outcome') + nodeid = test.get('nodeid', '') + if not nodeid: + continue + all_tests.append(nodeid) + if outcome == 'passed': + passing_tests.append(nodeid) + elif outcome == 'failed': + failing_tests.append(nodeid) + elif outcome == 'error': + error_tests.append(nodeid) + elif outcome == 'skipped': + skipped_tests.append(nodeid) + reason = test.get('longrepr', 'No reason') + if isinstance(reason, list): + reason = reason[0] if reason else 'No reason' + skipped_with_reasons[nodeid] = str(reason).strip() + elif outcome == 'xfailed': + xfailed_tests.append(nodeid) + reason = test.get('longrepr', 'No reason') + if isinstance(reason, list): + reason = reason[0] if reason else 'No reason' + xfailed_with_reasons[nodeid] = str(reason).strip() + elif outcome == 'xpassed': + xpassed_tests.append(nodeid) + + percentage = (passed / total * 100) if total > 0 else 0 + except FileNotFoundError: + print('Results file not found') except Exception as e: - print(f'Could not extract warnings: {e}') - - # Save test lists to artifact files instead of job outputs - test_data = { - 'passing_tests': passing_tests, - 'failing_tests': failing_tests, - 'skipped_tests': skipped_tests, - 'xfailed_tests': xfailed_tests, - 'all_tests': all_tests, - 'warnings': warnings_list - } + print(f'Error: {e}') + + # Save artifact data + with open('test_data.json', 'w') as f: + json.dump({ + 'passing_tests': passing_tests, + 'failing_tests': failing_tests, + 'error_tests': error_tests, + 'skipped_tests': skipped_tests, + 'xfailed_tests': xfailed_tests, + 'xpassed_tests': xpassed_tests, + 'all_tests': all_tests, + 'skipped_tests_with_reasons': skipped_with_reasons, + 'xfailed_tests_with_reasons': xfailed_with_reasons, + 'warnings': warnings_list + }, f, indent=2) + + print(f'Results: {passed}/{total} ({percentage:.1f}%)') - with open('target_test_data.json', 'w') as f: - json.dump(test_data, f, indent=2) - - print('Test data saved to target_test_data.json for artifact') - print('Results extraction completed') - - # Set scalar outputs only (no large arrays) with open(os.environ['GITHUB_OUTPUT'], 'a') as f: - f.write(f'total={target_total}\\n') - f.write(f'passed={target_passed}\\n') - f.write(f'percentage={target_percentage:.2f}\\n') - f.write(f'passing_count={len(passing_tests)}\\n') + f.write(f'total={total}\n') + f.write(f'passed={passed}\n') + f.write(f'percentage={percentage:.2f}\n') + f.write(f'failing_count={len(failing_tests)}\n') + f.write(f'error_count={len(error_tests)}\n') + f.write(f'skipped_count={len(skipped_tests)}\n') + f.write(f'xfailed_count={len(xfailed_tests)}\n') " - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Target branch test results processed: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} tests passed (${{ steps.extract-results.outputs.percentage }}%)" - - - name: Upload target branch artifacts + - name: Upload test artifacts if: always() uses: actions/upload-artifact@v4 with: - name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} + name: ${{ inputs.artifact_name }} path: | - target_test_data.json - target_test_output.txt - target_results.json - collection_output.txt - unittest_collection.json - debug_target_collection.log - debug_target_extract_results.log + ${{ inputs['working-directory'] }}/test_data.json + ${{ inputs['working-directory'] }}/test_output.txt + ${{ inputs['working-directory'] }}/results.json retention-days: 3 if-no-files-found: ignore - - # Add a step to set default outputs when collection errors are detected - - name: Set collection error outputs - id: set-error-outputs - if: steps.check-collection.outputs.has_collection_errors == 'true' - run: | - echo "::warning::Setting default outputs for target branch due to collection errors" - echo "total=0" >> $GITHUB_OUTPUT - echo "passed=0" >> $GITHUB_OUTPUT - echo "percentage=0.00" >> $GITHUB_OUTPUT - echo "passing_count=0" >> $GITHUB_OUTPUT - - compare-results: - needs: [test-source-branch, test-target-branch] - uses: ./.github/workflows/regression-test.yml - with: - runs_on: ${{ inputs.runs_on }} - baseline_label: ${{ inputs.target_branch_to_compare }} - baseline_results_artifact: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - baseline_results_filename: target_test_data.json - current_label: ${{ github.head_ref || github.ref_name || 'source branch' }} - current_results_artifact: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - current_results_filename: pr_test_data.json - baseline_passed: ${{ needs.test-target-branch.outputs.passed }} - baseline_total: ${{ needs.test-target-branch.outputs.total }} - baseline_percentage: ${{ needs.test-target-branch.outputs.percentage }} - current_passed: ${{ needs.test-source-branch.outputs.passed }} - current_total: ${{ needs.test-source-branch.outputs.total }} - current_percentage: ${{ needs.test-source-branch.outputs.percentage }} - baseline_collection_errors: ${{ needs.test-target-branch.outputs.collection_errors }} - baseline_no_tests_found: ${{ needs.test-target-branch.outputs.no_tests_found }} - current_collection_errors: ${{ needs.test-source-branch.outputs.collection_errors }} - current_no_tests_found: ${{ needs.test-source-branch.outputs.no_tests_found }} - artifact_name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - - perform-regression-analysis: - needs: [test-source-branch, test-target-branch] - uses: ./.github/workflows/meta-regression-analysis.yml - with: - item_type_singular: "test" - item_type_plural: "tests" - pr_number: ${{ github.event.pull_request.number }} - run_id: ${{ github.run_id }} - target_branch_artifact_name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - pr_branch_artifact_name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - - # Conditionally run notification job only if needed - prepare-notification: - name: Prepare Notification Data - needs: - [ - test-source-branch, - test-target-branch, - compare-results, - perform-regression-analysis, - ] - # Notify on collection errors, no tests found, compare result failure, or if regressions are detected - if: | - always() && - ( - needs.test-source-branch.outputs.collection_errors == 'true' || - needs.test-source-branch.outputs.no_tests_found == 'true' || - needs.compare-results.result == 'failure' || - needs.perform-regression-analysis.outputs.has_regressions == 'true' - ) - runs-on: ${{ fromJSON(inputs.runs_on) }} - outputs: - message_body: ${{ steps.construct_notification.outputs.message_body_out }} - ping_user_ids: ${{ steps.construct_notification.outputs.ping_user_ids_out }} - artifact_path: ${{ steps.construct_notification.outputs.artifact_path_out }} - should_notify: "true" - webhook_available_for_alert: ${{ steps.check_webhook_availability.outputs.webhook_available }} - - steps: - - name: Check for Discord Webhook URL - id: check_webhook_availability - run: | - if [ -z "${{ secrets.DISCORD_WEBHOOK_URL }}" ]; then - echo "::notice::DISCORD_WEBHOOK_URL secret is not set. Discord notifications will likely be skipped by the alert workflow if it relies on this secret." - echo "webhook_available=false" >> $GITHUB_OUTPUT - else - echo "webhook_available=true" >> $GITHUB_OUTPUT - fi - - name: Download regression details (if any) - id: download_regressions - if: always() - uses: actions/download-artifact@v4 - with: - name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - path: . # Download to current directory - continue-on-error: true - - - name: Check downloaded regression file - if: always() - run: | - echo "Checking for regression details file..." - if [ -f "regression_details.txt" ]; then - echo "✅ Regression details file found" - echo "File size: $(wc -c < regression_details.txt) bytes" - echo "First few lines:" - head -5 regression_details.txt - else - echo "❌ Regression details file not found" - fi - - if [ -f "comprehensive_regression_report.txt" ]; then - echo "✅ Comprehensive regression report found" - echo "File size: $(wc -c < comprehensive_regression_report.txt) bytes" - else - echo "❌ Comprehensive regression report not found" - fi - - - name: Construct Discord Notification - id: construct_notification - env: - LINT_RESULT: ${{ needs.lint.result }} - SOURCE_TEST_RESULT: ${{ needs.test-source-branch.result }} - TARGET_TEST_RESULT: ${{ needs.test-target-branch.result }} - COMPARE_RESULT: ${{ needs.compare-results.result }} - PR_COLLECTION_ERRORS: ${{ needs.test-source-branch.outputs.collection_errors }} - PR_NO_TESTS_FOUND: ${{ needs.test-source-branch.outputs.no_tests_found }} - PR_ERROR_TYPE: ${{ needs.test-source-branch.outputs.error_type }} - PR_ERROR_DETAILS_TRUNCATED: ${{ needs.test-source-branch.outputs.error_details }} - HAS_REGRESSIONS: ${{ needs.perform-regression-analysis.outputs.has_regressions }} - REGRESSION_COUNT: ${{ needs.perform-regression-analysis.outputs.regression_count }} - PR_TOTAL_TESTS: ${{ needs.test-source-branch.outputs.total }} - PR_PASSED_TESTS: ${{ needs.test-source-branch.outputs.passed }} - PR_PERCENTAGE: ${{ needs.test-source-branch.outputs.percentage }} - TARGET_TOTAL_TESTS: ${{ needs.test-target-branch.outputs.total }} - TARGET_PASSED_TESTS: ${{ needs.test-target-branch.outputs.passed }} - TARGET_PERCENTAGE: ${{ needs.test-target-branch.outputs.percentage }} - PR_NUMBER: ${{ github.event.pull_request.number }} - PR_TITLE: ${{ github.event.pull_request.title }} - PR_URL: ${{ github.event.pull_request.html_url }} - TARGET_BRANCH_NAME: ${{ inputs.target_branch_to_compare }} - PR_BRANCH_NAME: ${{ github.head_ref }} - REPO_URL: ${{ github.server_url }}/${{ github.repository }} - ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - GH_ASSIGNEES_JSON: ${{ toJson(github.event.pull_request.assignees) }} - USER_MAP_JSON: ${{ secrets.DISCORD_USER_MAP || '{}' }} - REGRESSION_FILE_PATH: "regression_details.txt" - DOWNLOAD_REGRESSIONS_OUTCOME: ${{ steps.download_regressions.outcome }} - INPUT_PING_LATEST_COMMITTER: ${{ inputs.ping_latest_committer }} - run: | - # Create debug file for detailed notification construction - exec 3>&1 4>&2 - exec 1> >(tee -a debug_notification_construction.log) 2>&1 - - MESSAGE_LINES=() # Use an array to build message lines - PING_KEYS_OUTPUT="" # Will be comma-separated GitHub logins - ARTIFACT_PATH_OUTPUT="" - - echo "Raw GH_ASSIGNEES_JSON value: [$GH_ASSIGNEES_JSON]" - echo "Raw USER_MAP_JSON value: [$USER_MAP_JSON]" - - # 1. Determine Pings - Collect GitHub Logins to pass to alert-discord.yml - # Initialize PING_KEYS_OUTPUT - PING_KEYS_OUTPUT="" - - # Add assignees to PING_KEYS_OUTPUT - if [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && command -v jq &> /dev/null; then - ASSIGNEE_LOGINS_ARRAY=($(echo "$GH_ASSIGNEES_JSON" | jq -r '.[].login // empty')) - echo "GH_ASSIGNEES_JSON received: $GH_ASSIGNEES_JSON" - echo "Extracted ASSIGNEE_LOGINS_ARRAY: (${ASSIGNEE_LOGINS_ARRAY[*]})" - echo "Count of assignees extracted: ${#ASSIGNEE_LOGINS_ARRAY[@]}" - - MAPPED_ASSIGNEE_COUNT=0 - TEMP_PING_KEYS=() - - for assignee_login in "${ASSIGNEE_LOGINS_ARRAY[@]}"; do - if [ -z "$assignee_login" ]; then - echo "Skipping empty assignee login." - continue - fi - echo "Processing assignee for ping: '$assignee_login'" - # Check if this assignee_login exists as a key in USER_MAP_JSON - if echo "$USER_MAP_JSON" | jq -e --arg K "$assignee_login" '.[$K]' > /dev/null; then - echo "Assignee '$assignee_login' FOUND in USER_MAP_JSON." - TEMP_PING_KEYS+=("$assignee_login") - MAPPED_ASSIGNEE_COUNT=$((MAPPED_ASSIGNEE_COUNT + 1)) - else - echo "Assignee '$assignee_login' NOT FOUND in USER_MAP_JSON." - fi - done - - echo "Total assignees found in USER_MAP_JSON and added to pings: $MAPPED_ASSIGNEE_COUNT" - - if [ ${#TEMP_PING_KEYS[@]} -gt 0 ]; then - PING_KEYS_OUTPUT=$(IFS=,; echo "${TEMP_PING_KEYS[*]}") - echo "Initial PING_KEYS_OUTPUT from assignees: [$PING_KEYS_OUTPUT]" - else - echo "No assignees found or GH_ASSIGNEES_JSON was empty, or no assignees were found in USER_MAP_JSON." - fi - elif [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && ! command -v jq &> /dev/null; then - echo "::warning::jq is not available. Cannot determine GitHub users (assignees) for pings." - else - echo "No user map JSON or jq not found. PING_KEYS_OUTPUT (from assignees) will be empty." - fi - - # Add latest committer if INPUT_PING_LATEST_COMMITTER is true - if [[ "$INPUT_PING_LATEST_COMMITTER" == "true" ]]; then - echo "INPUT_PING_LATEST_COMMITTER is true. Attempting to fetch latest committer for PR #${PR_NUMBER}." - if command -v gh &> /dev/null && [ -n "$PR_NUMBER" ]; then - LATEST_COMMITTER_LOGIN_RAW=$(gh pr view "$PR_NUMBER" --json commits --jq '.commits[-1].author.login' 2>/dev/null || echo "") - - if [ -n "$LATEST_COMMITTER_LOGIN_RAW" ] && [ "$LATEST_COMMITTER_LOGIN_RAW" != "null" ]; then - # Apply bot filter (e.g., names ending in [bot] or -bot) - LATEST_COMMITTER_LOGIN=$(echo "$LATEST_COMMITTER_LOGIN_RAW" | grep -v -E -i '(\[bot\]$|-bot$)' || echo "") - - if [ -n "$LATEST_COMMITTER_LOGIN" ]; then - echo "Latest committer identified: $LATEST_COMMITTER_LOGIN" - - # Check if this committer is already in PING_KEYS_OUTPUT - ALREADY_IN_LIST=0 - if [ -n "$PING_KEYS_OUTPUT" ]; then # Only check if PING_KEYS_OUTPUT is not empty - IFS=',' read -ra PING_ARRAY <<< "$PING_KEYS_OUTPUT" - for key in "${PING_ARRAY[@]}"; do - if [[ "$key" == "$LATEST_COMMITTER_LOGIN" ]]; then - ALREADY_IN_LIST=1 - break - fi - done - fi - - if [[ "$ALREADY_IN_LIST" -eq 0 ]]; then - if [ -z "$PING_KEYS_OUTPUT" ]; then - PING_KEYS_OUTPUT="$LATEST_COMMITTER_LOGIN" - else - PING_KEYS_OUTPUT="$PING_KEYS_OUTPUT,$LATEST_COMMITTER_LOGIN" - fi - echo "Added latest committer '$LATEST_COMMITTER_LOGIN' to PING_KEYS_OUTPUT. New list: [$PING_KEYS_OUTPUT]" - else - echo "Latest committer '$LATEST_COMMITTER_LOGIN' is already in PING_KEYS_OUTPUT (likely an assignee)." - fi - else - echo "Latest committer login '$LATEST_COMMITTER_LOGIN_RAW' was filtered out (likely a bot or pattern match) or empty after filter." - fi - else - echo "No latest committer login found for PR #$PR_NUMBER from gh command, or login was null." - fi - else - if ! command -v gh &> /dev/null; then - echo "::warning::gh command not available. Cannot fetch latest committer." - fi - if [ -z "$PR_NUMBER" ]; then - echo "::warning::PR_NUMBER is not set (event might not be a pull_request). Cannot fetch latest committer." - fi - fi - fi - - # Restore stdout/stderr for GitHub Actions to show final summary - exec 1>&3 2>&4 - - # Make this a standard echo for better visibility of the final list - echo "Final Ping Keys Output (GitHub Logins from test-py-unittest.yml): [$PING_KEYS_OUTPUT]" - echo "ping_user_ids_out=$PING_KEYS_OUTPUT" >> $GITHUB_OUTPUT - - # Store branch names in variables with proper quoting - PR_BRANCH="${PR_BRANCH_NAME:-unknown}" - TARGET_BRANCH="${TARGET_BRANCH_NAME:-unknown}" - - # 2. Construct Message Body - MESSAGE_LINES+=("**Pytest Comparison & Regression Analysis for PR [#${PR_NUMBER}: ${PR_TITLE}](${PR_URL})**") - MESSAGE_LINES+=("Branch: [\`${PR_BRANCH}\`](${REPO_URL}/tree/${PR_BRANCH}) against [\`${TARGET_BRANCH}\`](${REPO_URL}/tree/${TARGET_BRANCH})") - MESSAGE_LINES+=("---") - - # Job Status Summary - MESSAGE_LINES+=("**Job Status:**") - LINT_STATUS="Success" - if [[ "$LINT_RESULT" == "failure" ]]; then LINT_STATUS="Failed"; elif [[ "$LINT_RESULT" == "skipped" ]]; then LINT_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Linting: $LINT_STATUS") - - SOURCE_TEST_STATUS="Success" - if [[ "$SOURCE_TEST_RESULT" == "failure" ]]; then SOURCE_TEST_STATUS="Failed"; elif [[ "$SOURCE_TEST_RESULT" == "skipped" ]]; then SOURCE_TEST_STATUS="Skipped"; fi - MESSAGE_LINES+=("- PR Branch Tests (\`${PR_BRANCH}\`): $SOURCE_TEST_STATUS") - - TARGET_TEST_STATUS="Success" - if [[ "$TARGET_TEST_RESULT" == "failure" ]]; then TARGET_TEST_STATUS="Failed"; elif [[ "$TARGET_TEST_RESULT" == "skipped" ]]; then TARGET_TEST_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Target Branch Tests (\`${TARGET_BRANCH}\`): $TARGET_TEST_STATUS") - - COMPARE_STATUS="Success" - if [[ "$COMPARE_RESULT" == "failure" ]]; then COMPARE_STATUS="Failed"; elif [[ "$COMPARE_RESULT" == "skipped" ]]; then COMPARE_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Comparison & Regression: $COMPARE_STATUS") - MESSAGE_LINES+=("---") - - # Test Discovery Issues in PR Branch - if [[ "$PR_COLLECTION_ERRORS" == "true" ]]; then - MESSAGE_LINES+=("**:red_circle: ERROR: Test Discovery Failed in PR Branch (\`${PR_BRANCH}\`)**") - MESSAGE_LINES+=(" - Type: \`${PR_ERROR_TYPE}\`") - MESSAGE_LINES+=(" - This usually indicates import errors or syntax issues preventing tests from being collected.") - MESSAGE_LINES+=(" - See attached file for detailed error information.") - elif [[ "$PR_NO_TESTS_FOUND" == "true" ]]; then - MESSAGE_LINES+=("**:warning: WARNING: No Tests Found in PR Branch (\`${PR_BRANCH}\`)**") - MESSAGE_LINES+=(" - Pytest did not discover any test files matching its patterns.") - MESSAGE_LINES+=(" - Ensure your test files are correctly named (e.g., \`test_*.py\` or \`*_test.py\`) and located.") - fi - - # Regression Analysis Summary - if [[ "$HAS_REGRESSIONS" == "true" ]]; then - MESSAGE_LINES+=("**:red_circle: REGRESSIONS DETECTED**") - - # Check if we have comprehensive regression file with categories - if [ -f "comprehensive_regression_report.txt" ]; then - # Extract counts from comprehensive report - PASS_FAIL_COUNT=$(grep -o "PASS-TO-FAIL REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - PASS_SKIP_COUNT=$(grep -o "PASS-TO-SKIP REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_SKIP_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-SKIP IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_PASS_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-PASS IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - PASS_GONE_COUNT=$(grep -o "PASS-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_GONE_COUNT=$(grep -o "FAIL-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - DISCOVERY_COUNT=$(grep -o "DISCOVERY REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - NEW_TESTS_COUNT=$(grep -o "NEW TESTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - - # Add category summaries (≤5 show paths, >5 show count + refer to file) - if [[ "$PASS_FAIL_COUNT" -gt 0 ]]; then - if [[ "$PASS_FAIL_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Fail ($PASS_FAIL_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-FAIL REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_FAIL_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Fail:** $PASS_FAIL_COUNT tests (see attached file)") - fi - fi - - if [[ "$PASS_SKIP_COUNT" -gt 0 ]]; then - if [[ "$PASS_SKIP_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Skip ($PASS_SKIP_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-SKIP REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_SKIP_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Skip:** $PASS_SKIP_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements ($FAIL_SKIP_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements:** $FAIL_SKIP_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements ($FAIL_PASS_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-PASS IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_PASS_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements:** $FAIL_PASS_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$PASS_GONE_COUNT" -gt 0 ]]; then - if [[ "$PASS_GONE_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Gone ($PASS_GONE_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_GONE_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Gone:** $PASS_GONE_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_GONE_COUNT" -gt 0 ]]; then - if [[ "$FAIL_GONE_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Fail→Gone ($FAIL_GONE_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_GONE_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Fail→Gone:** $FAIL_GONE_COUNT tests (see attached file)") - fi - fi - - if [[ "$DISCOVERY_COUNT" -gt 0 ]]; then - if [[ "$DISCOVERY_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Discovery Warnings ($DISCOVERY_COUNT):**") - MESSAGE_LINES+=("• $DISCOVERY_COUNT new warnings (see attached file)") - else - MESSAGE_LINES+=("**Discovery Warnings:** $DISCOVERY_COUNT warnings (see attached file)") - fi - fi - - if [[ "$NEW_TESTS_COUNT" -gt 0 ]]; then - if [[ "$NEW_TESTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":sparkles: **New Tests ($NEW_TESTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "NEW TESTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$NEW_TESTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":sparkles: **New Tests:** $NEW_TESTS_COUNT tests (see attached file)") - fi - fi - - else - # Fallback to simple regression count - MESSAGE_LINES+=(" - **${REGRESSION_COUNT} test(s)** that were passing in \`${TARGET_BRANCH}\` are now **failing** in \`${PR_BRANCH}\`.") - fi - elif [[ "$COMPARE_RESULT" == "failure" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then - # This case handles general comparison failures NOT due to specific regressions - MESSAGE_LINES+=("**:warning: TEST RESULTS DECLINED**") - MESSAGE_LINES+=(" - The PR branch shows a decrease in test success compared to the target branch, but no specific regressions were identified by the \`meta-regression-analysis\` job.") - MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**") - MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**") - elif [[ "$COMPARE_RESULT" == "success" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then - MESSAGE_LINES+=("**:white_check_mark: NO REGRESSIONS DETECTED**") - MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**") - MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**") - fi - - if [[ "$HAS_REGRESSIONS" != "true" ]] && [ -f "comprehensive_regression_report.txt" ]; then - FAIL_SKIP_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-SKIP IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_PASS_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-PASS IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - NEW_TESTS_COUNT=$(grep -o "NEW TESTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 || "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 || "$NEW_TESTS_COUNT" -gt 0 ]]; then - MESSAGE_LINES+=("**:sparkles: Improvements & Additions**") - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements ($FAIL_SKIP_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements:** $FAIL_SKIP_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements ($FAIL_PASS_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-PASS IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_PASS_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements:** $FAIL_PASS_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$NEW_TESTS_COUNT" -gt 0 ]]; then - if [[ "$NEW_TESTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":sparkles: **New Tests ($NEW_TESTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "NEW TESTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$NEW_TESTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":sparkles: **New Tests:** $NEW_TESTS_COUNT tests (see attached file)") - fi - fi - fi - fi - - MESSAGE_LINES+=("---") - MESSAGE_LINES+=("[View Workflow Run](${ACTION_RUN_URL})") - - # Set artifact path - always prefer comprehensive report if it exists - if [ -f "comprehensive_regression_report.txt" ]; then - ARTIFACT_PATH_OUTPUT="comprehensive_regression_report.txt" - elif [ -f "$REGRESSION_FILE_PATH" ] && [[ "$DOWNLOAD_REGRESSIONS_OUTCOME" == "success" ]]; then - ARTIFACT_PATH_OUTPUT="$REGRESSION_FILE_PATH" - else - ARTIFACT_PATH_OUTPUT="" - fi - - # Construct with actual newlines - FINAL_MESSAGE_BODY=$(printf "%s\\n" "${MESSAGE_LINES[@]}") - if [ ${#MESSAGE_LINES[@]} -gt 0 ]; then - # Remove the very last actual newline - FINAL_MESSAGE_BODY="${FINAL_MESSAGE_BODY%\\n}" - fi - - echo "Final message body prepared in test-py-unittest.yml" - - echo "message_body_out<> $GITHUB_OUTPUT - echo "$FINAL_MESSAGE_BODY" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - echo "artifact_path_out=$ARTIFACT_PATH_OUTPUT" >> $GITHUB_OUTPUT - - notify-discord: - name: Send Discord Notification - needs: [prepare-notification] - if: | - always() && - needs.prepare-notification.outputs.should_notify == 'true' && - needs.prepare-notification.outputs.webhook_available_for_alert == 'true' - uses: ./.github/workflows/alert-discord.yml - with: - message_body: ${{ needs.prepare-notification.outputs.message_body }} - ping_user_ids: ${{ needs.prepare-notification.outputs.ping_user_ids }} - artifact_paths: ${{ needs.prepare-notification.outputs.artifact_path }} - should_notify: ${{ needs.prepare-notification.outputs.should_notify }} - runs_on: ${{ inputs.runs_on }} - secrets: - DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }} - DISCORD_USER_MAP: ${{ secrets.DISCORD_USER_MAP }} diff --git a/.github/workflows/test-rs-cargo.yml b/.github/workflows/test-rs-cargo.yml index 084046a..b525b91 100644 --- a/.github/workflows/test-rs-cargo.yml +++ b/.github/workflows/test-rs-cargo.yml @@ -167,79 +167,9 @@ jobs: run: | set -euo pipefail - cgroup_auto_workers() { - local n="" - - # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - - # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi + # Source shared worker calculation script + source "$GITHUB_WORKSPACE/.github/scripts/cgroup_workers.sh" + WORKERS=$(determine_workers "${{ inputs.parallel_workers }}" '${{ inputs.runs_on }}') echo "Running tests with $WORKERS threads..." diff --git a/.github/workflows/test-storybook.yml b/.github/workflows/test-storybook.yml deleted file mode 100644 index 2de12bf..0000000 --- a/.github/workflows/test-storybook.yml +++ /dev/null @@ -1,650 +0,0 @@ -name: Reusable Storybook Tests with Regression Analysis - -on: - workflow_call: - inputs: - node-version: - description: "Node.js version to use for Storybook tests" - required: false - type: string - default: "18.x" - storybook_port: - description: "Port for Storybook server" - required: false - type: string - default: "6006" - target_branch_to_compare: - description: "The target branch to compare against for regressions (e.g., main). If empty, regression check is skipped." - required: false - type: string - default: "" - runs_on: - description: "Runner label for the test jobs." - required: false - type: string - default: '["self-hosted", "multithreaded"]' - parallel_workers: - description: "Number of parallel workers for Storybook tests. Leave empty for runner default (6 for multithreaded, 1 for singlethreaded). Use 'auto' for cgroup-aware CPU count, or a number." - required: false - type: string - default: "" - storybook_start_command: - description: "Command to start Storybook server." - required: false - type: string - default: "npm run storybook" - storybook_test_command: - description: "Command to run Storybook tests." - required: false - type: string - default: "npm run storybook-test" - outputs: - pr_has_errors: - description: "Boolean indicating if the PR branch has Storybook test errors." - value: ${{ jobs.test-pr-branch-storybook.outputs.has_errors }} - pr_failing_stories_json: - description: "JSON list of failing story IDs on the PR branch." - value: ${{ jobs.test-pr-branch-storybook.outputs.failing_items_json }} - has_regressions: - description: "Boolean indicating if Storybook test regressions were found." - value: ${{ jobs.compare-results.outputs.has_regressions }} - regression_count: - description: "Number of Storybook test regressions found." - value: ${{ jobs.compare-results.outputs.regression_count }} - -jobs: - test-target-branch-storybook: - if: ${{ inputs.target_branch_to_compare != '' }} - name: Test Target Branch Stories - runs-on: ${{ fromJSON(inputs.runs_on) }} - outputs: - total: ${{ steps.results.outputs.total }} - passed: ${{ steps.results.outputs.passed }} - percentage: ${{ steps.results.outputs.percentage }} - passing_stories_json: ${{ steps.results.outputs.passing_items_json }} - collection_errors: ${{ steps.results.outputs.collection_errors }} - no_tests_found: ${{ steps.results.outputs.no_tests_found }} - has_errors: ${{ steps.results.outputs.has_errors }} - error_type: ${{ steps.results.outputs.error_type }} - failing_count: ${{ steps.results.outputs.failing_count }} - error_count: ${{ steps.results.outputs.error_count }} - skipped_count: ${{ steps.results.outputs.skipped_count }} - xfailed_count: ${{ steps.results.outputs.xfailed_count }} - - steps: - # Smart caching for target branch results - - name: Set cache keys - id: cache-keys - run: | - CACHE_VERSION="v1" - BASE_KEY="storybook-${CACHE_VERSION}-${{ inputs.target_branch_to_compare }}-${{ github.event.pull_request.base.sha || github.sha }}" - echo "base_key=$BASE_KEY" >> $GITHUB_OUTPUT - echo "pending_key=${BASE_KEY}-pending-${{ github.run_id }}" >> $GITHUB_OUTPUT - echo "Cache base key: $BASE_KEY" - - - name: Check for complete cache - id: cache-complete - uses: actions/cache/restore@v4 - with: - path: cached_target - key: ${{ steps.cache-keys.outputs.base_key }} - - - name: Check for pending cache - id: cache-pending - if: steps.cache-complete.outputs.cache-hit != 'true' - uses: actions/cache/restore@v4 - with: - path: cached_pending - key: ${{ steps.cache-keys.outputs.base_key }}-pending-impossible-match - restore-keys: | - ${{ steps.cache-keys.outputs.base_key }}-pending- - - - name: Determine initial status - id: initial-status - run: | - if [ "${{ steps.cache-complete.outputs.cache-hit }}" == "true" ]; then - echo "status=complete" >> $GITHUB_OUTPUT - echo "Found complete cache - will use it" - elif [ "${{ steps.cache-pending.outputs.cache-hit }}" == "true" ]; then - echo "status=pending" >> $GITHUB_OUTPUT - echo "Found pending cache - another job is running, will poll" - else - echo "status=miss" >> $GITHUB_OUTPUT - echo "No cache found - will run tests" - fi - - - name: Create pending marker - if: steps.initial-status.outputs.status == 'miss' - run: | - mkdir -p cached_pending_marker - echo "pending" > cached_pending_marker/status - echo "started=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> cached_pending_marker/status - echo "run_id=${{ github.run_id }}" >> cached_pending_marker/status - - - name: Save pending marker - if: steps.initial-status.outputs.status == 'miss' - uses: actions/cache/save@v4 - with: - path: cached_pending_marker - key: ${{ steps.cache-keys.outputs.pending_key }} - - - name: Poll for complete cache - id: poll-cache - if: steps.initial-status.outputs.status == 'pending' - env: - GH_TOKEN: ${{ github.token }} - run: | - echo "Another job is running tests, polling for results..." - TOTAL_WAIT=0 - MAX_WAIT=1200 - DELAY=5 - CACHE_KEY="${{ steps.cache-keys.outputs.base_key }}" - - while [ $TOTAL_WAIT -lt $MAX_WAIT ]; do - echo "Waiting ${DELAY}s... (${TOTAL_WAIT}s / ${MAX_WAIT}s elapsed)" - sleep $DELAY - TOTAL_WAIT=$((TOTAL_WAIT + DELAY)) - - CACHE_CHECK=$(gh cache list --key "$CACHE_KEY" --limit 1 2>/dev/null || echo "") - if echo "$CACHE_CHECK" | grep -q "$CACHE_KEY"; then - echo "Complete cache is now available!" - echo "found=true" >> $GITHUB_OUTPUT - break - fi - - DELAY=$((DELAY * 2)) - if [ $DELAY -gt 60 ]; then - DELAY=60 - fi - done - - if [ $TOTAL_WAIT -ge $MAX_WAIT ]; then - echo "Timeout after ${MAX_WAIT}s - will run tests ourselves" - echo "found=false" >> $GITHUB_OUTPUT - fi - - - name: Restore cache after poll - id: cache-after-poll - if: steps.poll-cache.outputs.found == 'true' - uses: actions/cache/restore@v4 - with: - path: cached_target - key: ${{ steps.cache-keys.outputs.base_key }} - - - name: Determine final status - id: final-status - run: | - if [ "${{ steps.cache-complete.outputs.cache-hit }}" == "true" ]; then - echo "cache_hit=true" >> $GITHUB_OUTPUT - elif [ "${{ steps.cache-after-poll.outputs.cache-hit }}" == "true" ]; then - echo "cache_hit=true" >> $GITHUB_OUTPUT - else - echo "cache_hit=false" >> $GITHUB_OUTPUT - fi - - - name: Load cached results - id: load-cache - if: steps.final-status.outputs.cache_hit == 'true' - run: | - echo "Loading cached target results" - if [ -f cached_target/outputs.env ]; then - cat cached_target/outputs.env >> $GITHUB_OUTPUT - fi - - - name: Upload cached artifact - if: steps.final-status.outputs.cache_hit == 'true' - uses: actions/upload-artifact@v4 - with: - name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - path: cached_target/test_data.json - if-no-files-found: ignore - - # === Only run tests if no usable cache === - - name: Checkout Target Branch - if: steps.final-status.outputs.cache_hit != 'true' - uses: actions/checkout@v4.2.2 - with: - ref: ${{ inputs.target_branch_to_compare }} - submodules: "recursive" - - - name: Use Node.js ${{ inputs.node-version }} - if: steps.final-status.outputs.cache_hit != 'true' - uses: actions/setup-node@v4 - with: - node-version: ${{ inputs.node-version }} - cache: "npm" - - - name: Install dependencies (Target) - if: steps.final-status.outputs.cache_hit != 'true' - run: npm ci - - - name: Install Playwright browsers (Target) - if: steps.final-status.outputs.cache_hit != 'true' - run: npx playwright install --with-deps - - - name: Run Storybook (Target) - if: steps.final-status.outputs.cache_hit != 'true' - run: ${{ inputs.storybook_start_command }} -- --port ${{ inputs.storybook_port }} & - - - name: Wait for Storybook (Target) - if: steps.final-status.outputs.cache_hit != 'true' - run: | - echo "Waiting for Storybook (Target) to start on port ${{ inputs.storybook_port }}..." - timeout=120 - counter=0 - until $(curl --output /dev/null --silent --head --fail http://localhost:${{ inputs.storybook_port }}); do - if [ $counter -ge $timeout ]; then - echo "Timed out waiting for Storybook (Target) to start" - exit 1 - fi - echo "Waiting for Storybook (Target)... ($counter seconds so far)" - sleep 5 - counter=$((counter + 5)) - done - echo "Storybook (Target) is up and running on port ${{ inputs.storybook_port }}!" - - - name: Run Storybook tests (Target) - id: run-tests-target - if: steps.final-status.outputs.cache_hit != 'true' - run: | - set -euo pipefail - - cgroup_auto_workers() { - local n="" - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi - - echo "Running Storybook tests with $WORKERS workers..." - - WORKER_FLAGS="" - if [ "$WORKERS" != "1" ]; then - WORKER_FLAGS="--maxWorkers=$WORKERS" - fi - - set +e - ${{ inputs.storybook_test_command }} -- --url http://localhost:${{ inputs.storybook_port }} $WORKER_FLAGS --json --outputFile=target_storybook_results.json 2>&1 | tee test_output.txt - TEST_EXIT=$? - set -e - - echo "Storybook tests on target branch completed (exit code: $TEST_EXIT)." - if [ ! -f target_storybook_results.json ]; then - echo '{"testResults": [], "numTotalTests": 0, "numPassedTests": 0}' > target_storybook_results.json - fi - - - name: Normalise Storybook results (Target) - id: normalise-target - if: steps.final-status.outputs.cache_hit != 'true' - run: | - python3 "$GITHUB_WORKSPACE/.github/scripts/storybook_results_to_standard_json.py" \ - target_storybook_results.json \ - target_test_data.json \ - --github-output "$GITHUB_OUTPUT" - - - name: Save results to cache - if: steps.final-status.outputs.cache_hit != 'true' - run: | - mkdir -p cached_target - [ -f target_test_data.json ] && cp target_test_data.json cached_target/test_data.json - [ -f target_storybook_results.json ] && cp target_storybook_results.json cached_target/ - echo "complete" > cached_target/status - cat > cached_target/outputs.env << EOF - total=${{ steps.normalise-target.outputs.total || '0' }} - passed=${{ steps.normalise-target.outputs.passed || '0' }} - percentage=${{ steps.normalise-target.outputs.percentage || '0.00' }} - collection_errors=${{ steps.normalise-target.outputs.collection_errors || 'false' }} - no_tests_found=${{ steps.normalise-target.outputs.no_tests_found || 'false' }} - has_errors=${{ steps.normalise-target.outputs.has_failures || 'false' }} - error_type=none - failing_count=${{ steps.normalise-target.outputs.failed || '0' }} - error_count=0 - skipped_count=${{ steps.normalise-target.outputs.skipped || '0' }} - xfailed_count=${{ steps.normalise-target.outputs.xfailed || '0' }} - passing_items_json=${{ steps.normalise-target.outputs.passing_items_json || '[]' }} - EOF - sed -i 's/^[[:space:]]*//' cached_target/outputs.env - - - name: Upload to cache - if: steps.final-status.outputs.cache_hit != 'true' - uses: actions/cache/save@v4 - with: - path: cached_target - key: ${{ steps.cache-keys.outputs.base_key }} - - - name: Upload target branch artifacts - if: steps.final-status.outputs.cache_hit != 'true' - uses: actions/upload-artifact@v4 - with: - name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - path: | - target_test_data.json - target_storybook_results.json - retention-days: 3 - if-no-files-found: ignore - - - name: Set final outputs - id: results - run: | - if [ "${{ steps.final-status.outputs.cache_hit }}" == "true" ]; then - echo "total=${{ steps.load-cache.outputs.total || '0' }}" >> $GITHUB_OUTPUT - echo "passed=${{ steps.load-cache.outputs.passed || '0' }}" >> $GITHUB_OUTPUT - echo "percentage=${{ steps.load-cache.outputs.percentage || '0.00' }}" >> $GITHUB_OUTPUT - echo "collection_errors=${{ steps.load-cache.outputs.collection_errors || 'false' }}" >> $GITHUB_OUTPUT - echo "no_tests_found=${{ steps.load-cache.outputs.no_tests_found || 'false' }}" >> $GITHUB_OUTPUT - echo "has_errors=${{ steps.load-cache.outputs.has_errors || 'false' }}" >> $GITHUB_OUTPUT - echo "error_type=${{ steps.load-cache.outputs.error_type || 'none' }}" >> $GITHUB_OUTPUT - echo "failing_count=${{ steps.load-cache.outputs.failing_count || '0' }}" >> $GITHUB_OUTPUT - echo "error_count=${{ steps.load-cache.outputs.error_count || '0' }}" >> $GITHUB_OUTPUT - echo "skipped_count=${{ steps.load-cache.outputs.skipped_count || '0' }}" >> $GITHUB_OUTPUT - echo "xfailed_count=${{ steps.load-cache.outputs.xfailed_count || '0' }}" >> $GITHUB_OUTPUT - echo "passing_items_json=${{ steps.load-cache.outputs.passing_items_json || '[]' }}" >> $GITHUB_OUTPUT - else - echo "total=${{ steps.normalise-target.outputs.total || '0' }}" >> $GITHUB_OUTPUT - echo "passed=${{ steps.normalise-target.outputs.passed || '0' }}" >> $GITHUB_OUTPUT - echo "percentage=${{ steps.normalise-target.outputs.percentage || '0.00' }}" >> $GITHUB_OUTPUT - echo "collection_errors=${{ steps.normalise-target.outputs.collection_errors || 'false' }}" >> $GITHUB_OUTPUT - echo "no_tests_found=${{ steps.normalise-target.outputs.no_tests_found || 'false' }}" >> $GITHUB_OUTPUT - echo "has_errors=${{ steps.normalise-target.outputs.has_failures || 'false' }}" >> $GITHUB_OUTPUT - echo "error_type=none" >> $GITHUB_OUTPUT - echo "failing_count=${{ steps.normalise-target.outputs.failed || '0' }}" >> $GITHUB_OUTPUT - echo "error_count=0" >> $GITHUB_OUTPUT - echo "skipped_count=${{ steps.normalise-target.outputs.skipped || '0' }}" >> $GITHUB_OUTPUT - echo "xfailed_count=${{ steps.normalise-target.outputs.xfailed || '0' }}" >> $GITHUB_OUTPUT - echo "passing_items_json=${{ steps.normalise-target.outputs.passing_items_json || '[]' }}" >> $GITHUB_OUTPUT - fi - - test-pr-branch-storybook: - name: Test PR Branch Stories - runs-on: ${{ fromJSON(inputs.runs_on) }} - outputs: - has_errors: ${{ steps.run-tests-pr.outcome == 'failure' || steps.normalise-pr.outputs.has_failures == 'true' }} - failing_items_json: ${{ steps.normalise-pr.outputs.failing_items_json }} - total: ${{ steps.normalise-pr.outputs.total }} - passed: ${{ steps.normalise-pr.outputs.passed }} - percentage: ${{ steps.normalise-pr.outputs.percentage }} - collection_errors: ${{ steps.normalise-pr.outputs.collection_errors }} - no_tests_found: ${{ steps.normalise-pr.outputs.no_tests_found }} - failing_count: ${{ steps.normalise-pr.outputs.failed }} - error_count: ${{ steps.normalise-pr.outputs.error_count || '0' }} - skipped_count: ${{ steps.normalise-pr.outputs.skipped }} - xfailed_count: ${{ steps.normalise-pr.outputs.xfailed }} - - steps: - - name: Checkout Repository (PR) - uses: actions/checkout@v4.2.2 - with: - submodules: "recursive" - - - name: Use Node.js ${{ inputs.node-version }} - uses: actions/setup-node@v4 - with: - node-version: ${{ inputs.node-version }} - cache: "npm" - - - name: Install dependencies (PR) - run: npm ci - - - name: Install Playwright browsers (PR) - run: npx playwright install --with-deps - - - name: Run Storybook (PR) - run: ${{ inputs.storybook_start_command }} -- --port ${{ inputs.storybook_port }} & - - - name: Wait for Storybook (PR) - run: | - echo "Waiting for Storybook (PR) to start on port ${{ inputs.storybook_port }}..." - timeout=120 - counter=0 - until $(curl --output /dev/null --silent --head --fail http://localhost:${{ inputs.storybook_port }}); do - if [ $counter -ge $timeout ]; then - echo "Timed out waiting for Storybook (PR) to start" - exit 1 - fi - echo "Waiting for Storybook (PR)... ($counter seconds so far)" - sleep 5 - counter=$((counter + 5)) - done - echo "Storybook (PR) is up and running on port ${{ inputs.storybook_port }}!" - - - name: Run Storybook tests (PR) - id: run-tests-pr - run: | - set -euo pipefail - - cgroup_auto_workers() { - local n="" - if [ -f /sys/fs/cgroup/cpu.max ]; then - local quota period - quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" - period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" - if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then - local quota period - quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" - period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" - if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then - n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" - fi - fi - if [ -z "$n" ]; then - local f="" - if [ -f /sys/fs/cgroup/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset.cpus" - elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then - f="/sys/fs/cgroup/cpuset/cpuset.cpus" - fi - if [ -n "$f" ]; then - local spec - spec="$(cat "$f" | tr -d '[:space:]')" - if [ -n "$spec" ]; then - local count=0 - IFS=',' read -r -a parts <<< "$spec" - for p in "${parts[@]}"; do - if [[ "$p" == *-* ]]; then - local a="${p%%-*}" - local b="${p##*-}" - if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then - count=$((count + b - a + 1)) - fi - elif [[ "$p" =~ ^[0-9]+$ ]]; then - count=$((count + 1)) - fi - done - if [ "$count" -gt 0 ]; then - n="$count" - fi - fi - fi - fi - if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then - n="1" - fi - echo "$n" - } - - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - elif [ "$WORKERS" = "auto" ]; then - WORKERS="$(cgroup_auto_workers)" - fi - - echo "Running Storybook tests with $WORKERS workers..." - - WORKER_FLAGS="" - if [ "$WORKERS" != "1" ]; then - WORKER_FLAGS="--maxWorkers=$WORKERS" - fi - - set +e - ${{ inputs.storybook_test_command }} -- --url http://localhost:${{ inputs.storybook_port }} $WORKER_FLAGS --json --outputFile=pr_storybook_results.json 2>&1 | tee test_output.txt - TEST_EXIT=$? - set -e - - echo "Storybook tests on PR branch completed (exit code: $TEST_EXIT)." - if [ ! -f pr_storybook_results.json ]; then - echo '{"testResults": [], "numTotalTests": 0, "numPassedTests": 0, "numFailedTests": 0}' > pr_storybook_results.json - fi - - - name: Normalise Storybook results (PR) - id: normalise-pr - run: | - python3 "$GITHUB_WORKSPACE/.github/scripts/storybook_results_to_standard_json.py" \ - pr_storybook_results.json \ - pr_test_data.json \ - --github-output "$GITHUB_OUTPUT" - - - name: Upload PR branch artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - path: | - pr_test_data.json - pr_storybook_results.json - ./*-snapshots/ - ./coverage/ - retention-days: 7 - if-no-files-found: ignore - - compare-results: - if: ${{ inputs.target_branch_to_compare != '' }} - needs: [test-target-branch-storybook, test-pr-branch-storybook] - uses: ./.github/workflows/regression-test.yml - with: - runs_on: ${{ inputs.runs_on }} - baseline_label: ${{ inputs.target_branch_to_compare }} - baseline_results_artifact: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - baseline_results_filename: target_test_data.json - current_label: ${{ github.head_ref || github.ref_name || 'source branch' }} - current_results_artifact: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - current_results_filename: pr_test_data.json - baseline_passed: ${{ needs.test-target-branch-storybook.outputs.passed || '0' }} - baseline_total: ${{ needs.test-target-branch-storybook.outputs.total || '0' }} - baseline_percentage: ${{ needs.test-target-branch-storybook.outputs.percentage || '0' }} - current_passed: ${{ needs.test-pr-branch-storybook.outputs.passed || '0' }} - current_total: ${{ needs.test-pr-branch-storybook.outputs.total || '0' }} - current_percentage: ${{ needs.test-pr-branch-storybook.outputs.percentage || '0' }} - baseline_collection_errors: ${{ needs.test-target-branch-storybook.outputs.collection_errors || 'false' }} - baseline_no_tests_found: ${{ needs.test-target-branch-storybook.outputs.no_tests_found || 'false' }} - current_collection_errors: ${{ needs.test-pr-branch-storybook.outputs.collection_errors || 'false' }} - current_no_tests_found: ${{ needs.test-pr-branch-storybook.outputs.no_tests_found || 'false' }} - artifact_name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_storybook - - perform-regression-analysis: - if: ${{ inputs.target_branch_to_compare != '' }} - needs: [test-target-branch-storybook, test-pr-branch-storybook] - uses: ./.github/workflows/meta-regression-analysis.yml - with: - item_type_singular: "Storybook story" - item_type_plural: "Storybook stories" - pr_number: ${{ github.event.pull_request.number }} - run_id: ${{ github.run_id }} - target_branch_artifact_name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - pr_branch_artifact_name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - - check-storybook-results: - name: Check Storybook Results & Regressions - runs-on: ${{ fromJSON(inputs.runs_on) }} - needs: [test-pr-branch-storybook, compare-results, perform-regression-analysis] - if: always() - steps: - - name: Evaluate Storybook Test Results - run: | - PR_HAS_ERRORS="${{ needs.test-pr-branch-storybook.outputs.has_errors }}" - REGRESSION_ANALYSIS_INTENDED="${{ inputs.target_branch_to_compare != '' }}" - HAS_REGRESSIONS="false" - REGRESSION_COUNT="0" - - echo "--- Storybook Test Results ---" - echo "PR Branch Storybook Test Errors: $PR_HAS_ERRORS" - echo "Target Branch for Comparison: ${{ inputs.target_branch_to_compare }}" - - if [[ "$REGRESSION_ANALYSIS_INTENDED" == "true" ]]; then - if [[ "${{ needs.compare-results.result }}" != "skipped" ]]; then - HAS_REGRESSIONS="${{ needs.compare-results.outputs.has_regressions }}" - REGRESSION_COUNT="${{ needs.compare-results.outputs.regression_count }}" - echo "Storybook Regressions Found: $HAS_REGRESSIONS ($REGRESSION_COUNT)" - else - echo "Storybook regression analysis job was skipped." - fi - - if [[ "$HAS_REGRESSIONS" == "true" ]]; then - echo "::error::${REGRESSION_COUNT} Storybook test(s) regressed. Stories that were passing on target branch ('${{ inputs.target_branch_to_compare }}') are now failing/broken on the PR branch." - exit 1 - fi - fi - - if [[ "$PR_HAS_ERRORS" == "true" ]]; then - echo "::error::Storybook tests failed on the PR branch. Check artifacts for details." - exit 1 - fi - - echo "Storybook tests passed and no new regressions detected (if applicable)." - echo "--- End Storybook Test Results ---"