diff --git a/.github/workflows/run-branch-test.yml b/.github/workflows/run-branch-test.yml index 5b12b10..736be89 100644 --- a/.github/workflows/run-branch-test.yml +++ b/.github/workflows/run-branch-test.yml @@ -13,6 +13,18 @@ on: required: false type: string default: "3.10" + # Node.js options for Jest/Mocha + node-version: + description: "Node.js version for Jest/Mocha." + required: false + type: string + default: "18" + # Rust/cargo options + rust-version: + description: "Rust toolchain version for cargo test." + required: false + type: string + default: "stable" # C++/CMake options cmake-version: description: "CMake version for C++ tests." @@ -51,7 +63,7 @@ on: type: string default: '["self-hosted", "multithreaded"]' parallel_workers: - description: "Number of parallel workers. Leave empty for auto-detect (6 for multithreaded, 1 for singlethreaded). Use 'auto' for CPU count." + description: "Number of parallel workers. Leave empty for auto-detect (6 for multithreaded, 1 for singlethreaded). Use 'auto' for cgroup-aware CPU count." required: false type: string default: "" @@ -60,6 +72,33 @@ on: required: false type: boolean default: false + # Jest options + jest-command: + description: "Base command used to invoke Jest." + required: false + type: string + default: "npx jest" + jest-extra-args: + description: "Additional arguments to pass to Jest." + required: false + type: string + default: "" + # Mocha options + mocha-command: + description: "Base command used to invoke Mocha." + required: false + type: string + default: "npx mocha" + mocha-extra-args: + description: "Additional arguments to pass to Mocha." + required: false + type: string + default: "" + working-directory: + description: "Directory where JS test commands should be executed." + required: false + type: string + default: "." secrets: DISCORD_WEBHOOK_URL: required: false @@ -67,17 +106,23 @@ on: required: false outputs: has_regressions: - description: "Whether regressions were detected (pytest)" - value: ${{ jobs.compare.outputs.has_regressions }} + description: "Whether regressions were detected (any framework)" + value: ${{ jobs.aggregate-results.outputs.has_regressions }} regression_count: - description: "Number of regressions (pytest)" - value: ${{ jobs.compare.outputs.regression_count }} + description: "Total number of regressions (all frameworks)" + value: ${{ jobs.aggregate-results.outputs.regression_count }} + has_regressions_cargo: + description: "Whether regressions were detected (cargo)" + value: ${{ jobs.cargo-compare.outputs.has_regressions }} + regression_count_cargo: + description: "Number of regressions (cargo)" + value: ${{ jobs.cargo-compare.outputs.regression_count }} cpp_has_regressions: description: "Whether regressions were detected (C++)" - value: ${{ jobs.compare-cpp.outputs.has_regressions }} + value: ${{ jobs.cpp-compare.outputs.has_regressions }} cpp_regression_count: description: "Number of regressions (C++)" - value: ${{ jobs.compare-cpp.outputs.regression_count }} + value: ${{ jobs.cpp-compare.outputs.regression_count }} jobs: # Detect which test frameworks are present @@ -85,6 +130,9 @@ jobs: runs-on: ${{ fromJSON(inputs.runs_on) }} outputs: has_pytest: ${{ steps.detect.outputs.has_pytest }} + has_jest: ${{ steps.detect.outputs.has_jest }} + has_mocha: ${{ steps.detect.outputs.has_mocha }} + has_cargo: ${{ steps.detect.outputs.has_cargo }} has_cpp: ${{ steps.detect.outputs.has_cpp }} steps: - uses: actions/checkout@v4.2.2 @@ -92,11 +140,19 @@ jobs: id: detect run: | # Detect pytest - if [ -f "pyproject.toml" ] || [ -f "setup.py" ] || [ -f "requirements.txt" ] || find . -name "test_*.py" -o -name "*_test.py" | head -1 | grep -q .; then - echo "has_pytest=true" >> $GITHUB_OUTPUT + if [ -f "pyproject.toml" ] || [ -f "setup.py" ] || [ -f "requirements.txt" ] || find . -name "test_*.py" -o -name "*_test.py" 2>/dev/null | head -1 | grep -q .; then + echo "has_pytest=true" >> "$GITHUB_OUTPUT" echo "✅ Detected: pytest" else - echo "has_pytest=false" >> $GITHUB_OUTPUT + echo "has_pytest=false" >> "$GITHUB_OUTPUT" + fi + + # Detect cargo (Rust) + if [ -f "Cargo.toml" ]; then + echo "has_cargo=true" >> "$GITHUB_OUTPUT" + echo "✅ Detected: cargo (Rust)" + else + echo "has_cargo=false" >> "$GITHUB_OUTPUT" fi # Detect C++ with CMake and tests @@ -116,10 +172,53 @@ jobs: echo "✅ Detected: C++ test files" fi fi - echo "has_cpp=$HAS_CPP" >> $GITHUB_OUTPUT + echo "has_cpp=$HAS_CPP" >> "$GITHUB_OUTPUT" + + # Detect Jest + HAS_JEST="false" + if [ -f "package.json" ]; then + # Check for jest in dependencies or devDependencies + if grep -q '"jest"' package.json 2>/dev/null; then + HAS_JEST="true" + fi + # Check for jest config files + if [ -f "jest.config.js" ] || [ -f "jest.config.ts" ] || [ -f "jest.config.mjs" ] || [ -f "jest.config.cjs" ] || [ -f "jest.config.json" ]; then + HAS_JEST="true" + fi + # Check for jest section in package.json + if grep -q '"jest":' package.json 2>/dev/null; then + HAS_JEST="true" + fi + fi + echo "has_jest=$HAS_JEST" >> "$GITHUB_OUTPUT" + if [ "$HAS_JEST" = "true" ]; then + echo "✅ Detected: Jest" + fi + # Detect Mocha + HAS_MOCHA="false" + if [ -f "package.json" ]; then + # Check for mocha in dependencies or devDependencies + if grep -q '"mocha"' package.json 2>/dev/null; then + HAS_MOCHA="true" + fi + # Check for mocha config files + if [ -f ".mocharc.js" ] || [ -f ".mocharc.json" ] || [ -f ".mocharc.yml" ] || [ -f ".mocharc.yaml" ] || [ -f ".mocharc.cjs" ] || [ -f ".mocharc.mjs" ]; then + HAS_MOCHA="true" + fi + # Check for mocha section in package.json + if grep -q '"mocha":' package.json 2>/dev/null; then + HAS_MOCHA="true" + fi + fi + echo "has_mocha=$HAS_MOCHA" >> "$GITHUB_OUTPUT" + if [ "$HAS_MOCHA" = "true" ]; then + echo "✅ Detected: Mocha" + fi + + # ==================== PYTEST ==================== # Test source branch (always fresh, no caching) - test-source: + pytest-source: needs: detect-frameworks if: needs.detect-frameworks.outputs.has_pytest == 'true' uses: ./.github/workflows/test-py-pytest.yml @@ -130,500 +229,22 @@ jobs: artifact_name: pytest_source_${{ github.event.pull_request.number || github.run_id }} parallel_workers: ${{ inputs.parallel_workers }} - # Test target branch with smart caching - # Cache is shared across PRs targeting same branch+SHA - # First PR to run populates cache, subsequent PRs use it if available - test-target: + # Test target branch for pytest + pytest-target: needs: detect-frameworks if: needs.detect-frameworks.outputs.has_pytest == 'true' - runs-on: ${{ fromJSON(inputs.runs_on) }} - outputs: - total: ${{ steps.results.outputs.total }} - passed: ${{ steps.results.outputs.passed }} - percentage: ${{ steps.results.outputs.percentage }} - collection_errors: ${{ steps.results.outputs.collection_errors }} - no_tests_found: ${{ steps.results.outputs.no_tests_found }} - has_errors: ${{ steps.results.outputs.has_errors }} - error_type: ${{ steps.results.outputs.error_type }} - failing_count: ${{ steps.results.outputs.failing_count }} - error_count: ${{ steps.results.outputs.error_count }} - skipped_count: ${{ steps.results.outputs.skipped_count }} - xfailed_count: ${{ steps.results.outputs.xfailed_count }} - xpassed_count: ${{ steps.results.outputs.xpassed_count }} - - steps: - # Define cache keys - - name: Set cache keys - id: cache-keys - if: inputs.use_target_cache - run: | - # Version bump forces cache invalidation when extraction logic changes - CACHE_VERSION="v4" - BASE_KEY="pytest-${CACHE_VERSION}-${{ inputs.target_branch }}-${{ github.event.pull_request.base.sha || github.sha }}" - echo "base_key=$BASE_KEY" >> $GITHUB_OUTPUT - echo "pending_key=${BASE_KEY}-pending-${{ github.run_id }}" >> $GITHUB_OUTPUT - echo "🔍 Cache base key: $BASE_KEY" - - # Try to restore complete results first - - name: Check for complete cache - id: cache-complete - if: inputs.use_target_cache - uses: actions/cache/restore@v4 - with: - path: cached_target - key: ${{ steps.cache-keys.outputs.base_key }} - - # If no complete cache, check for any pending cache (someone else is running) - - name: Check for pending cache - id: cache-pending - if: inputs.use_target_cache && steps.cache-complete.outputs.cache-hit != 'true' - uses: actions/cache/restore@v4 - with: - path: cached_pending - key: ${{ steps.cache-keys.outputs.base_key }}-pending-impossible-match - restore-keys: | - ${{ steps.cache-keys.outputs.base_key }}-pending- - - - name: Determine initial status - id: initial-status - run: | - if [ "${{ inputs.use_target_cache }}" != "true" ]; then - echo "status=disabled" >> $GITHUB_OUTPUT - echo "🔄 Cache disabled - will run fresh tests" - elif [ "${{ steps.cache-complete.outputs.cache-hit }}" == "true" ]; then - echo "status=complete" >> $GITHUB_OUTPUT - echo "✅ Found complete cache - will use it" - elif [ "${{ steps.cache-pending.outputs.cache-hit }}" == "true" ]; then - echo "status=pending" >> $GITHUB_OUTPUT - echo "⏳ Found pending cache - another job is running, will poll" - else - echo "status=miss" >> $GITHUB_OUTPUT - echo "📭 No cache found - will run tests" - fi - - # If cache miss, immediately save a pending marker so others know to wait - - name: Create pending marker - if: inputs.use_target_cache && steps.initial-status.outputs.status == 'miss' - run: | - mkdir -p cached_pending_marker - echo "pending" > cached_pending_marker/status - echo "started=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> cached_pending_marker/status - echo "run_id=${{ github.run_id }}" >> cached_pending_marker/status - - - name: Save pending marker - if: inputs.use_target_cache && steps.initial-status.outputs.status == 'miss' - uses: actions/cache/save@v4 - with: - path: cached_pending_marker - key: ${{ steps.cache-keys.outputs.pending_key }} - - # If pending found, poll for complete cache with exponential backoff - - name: Poll for complete cache - id: poll-cache - if: inputs.use_target_cache && steps.initial-status.outputs.status == 'pending' - env: - GH_TOKEN: ${{ github.token }} - run: | - echo "⏳ Another job is running tests, polling for results..." - TOTAL_WAIT=0 - MAX_WAIT=1200 # 20 minutes - DELAY=5 - CACHE_KEY="${{ steps.cache-keys.outputs.base_key }}" - - while [ $TOTAL_WAIT -lt $MAX_WAIT ]; do - echo "⏳ Waiting ${DELAY}s... (${TOTAL_WAIT}s / ${MAX_WAIT}s elapsed)" - sleep $DELAY - TOTAL_WAIT=$((TOTAL_WAIT + DELAY)) - - # Check if complete cache exists now using GitHub API - CACHE_CHECK=$(gh cache list --key "$CACHE_KEY" --limit 1 2>/dev/null || echo "") - if echo "$CACHE_CHECK" | grep -q "$CACHE_KEY"; then - echo "✅ Complete cache is now available!" - echo "found=true" >> $GITHUB_OUTPUT - break - fi - - # Exponential backoff: 5, 10, 20, 40, 60, 60... - DELAY=$((DELAY * 2)) - if [ $DELAY -gt 60 ]; then - DELAY=60 - fi - done - - if [ $TOTAL_WAIT -ge $MAX_WAIT ]; then - echo "⏰ Timeout after ${MAX_WAIT}s - will run tests ourselves" - echo "found=false" >> $GITHUB_OUTPUT - fi - - # Restore complete cache after polling found it - - name: Restore cache after poll - id: cache-after-poll - if: inputs.use_target_cache && steps.poll-cache.outputs.found == 'true' - uses: actions/cache/restore@v4 - with: - path: cached_target - key: ${{ steps.cache-keys.outputs.base_key }} - - - name: Determine final status - id: final-status - run: | - if [ "${{ inputs.use_target_cache }}" != "true" ]; then - echo "cache_hit=false" >> $GITHUB_OUTPUT - echo "🔄 Cache disabled - running fresh tests" - elif [ "${{ steps.cache-complete.outputs.cache-hit }}" == "true" ]; then - echo "cache_hit=true" >> $GITHUB_OUTPUT - echo "✅ Using complete cache (found immediately)" - elif [ "${{ steps.cache-after-poll.outputs.cache-hit }}" == "true" ]; then - echo "cache_hit=true" >> $GITHUB_OUTPUT - echo "✅ Using complete cache (found after polling)" - else - echo "cache_hit=false" >> $GITHUB_OUTPUT - echo "🧪 Will run tests" - fi - - - name: Load cached results - id: load-cache - if: inputs.use_target_cache && steps.final-status.outputs.cache_hit == 'true' - run: | - echo "✅ Loading cached target results (skipping test run)" - if [ -f cached_target/outputs.env ]; then - cat cached_target/outputs.env >> $GITHUB_OUTPUT - fi - - - name: Upload cached artifact - if: inputs.use_target_cache && steps.final-status.outputs.cache_hit == 'true' - uses: actions/upload-artifact@v4 - with: - name: pytest_target_${{ github.event.pull_request.number || github.run_id }} - path: cached_target/test_data.json - if-no-files-found: ignore - - # === Only run tests if no usable cache === - - name: Checkout - if: steps.final-status.outputs.cache_hit != 'true' - uses: actions/checkout@v4.2.2 - with: - submodules: "recursive" - ref: ${{ inputs.target_branch }} - - - name: Set up Python - if: steps.final-status.outputs.cache_hit != 'true' - uses: actions/setup-python@v5.3.0 - with: - python-version: "${{ inputs.python-version }}" - - - name: Install dependencies - if: steps.final-status.outputs.cache_hit != 'true' - run: | - echo "📦 Installing dependencies..." - python -m pip install --upgrade pip - pip install pytest pytest-json-report pytest-asyncio pytest-xdist - PYPROJECT=$(find . -name "pyproject.toml" -type f | head -n 1) - if [ -n "$PYPROJECT" ]; then - pip install -e "$(dirname "$PYPROJECT")[dev]" - elif [ -f requirements.txt ]; then - pip install -r requirements.txt - fi - - - name: Check for test collection errors - id: check-collection - if: steps.final-status.outputs.cache_hit != 'true' - run: | - echo "🔍 Running pytest collection check..." - python -m pytest --collect-only -v > collection_output.txt 2>&1 || true - - HAS_COLLECTION_ERRORS="false" - NO_TESTS_FOUND="false" - ERROR_TYPE="none" - - if grep -q "ImportError\|ModuleNotFoundError\|SyntaxError\|ERROR collecting\|Interrupted:" collection_output.txt; then - echo "::error::Test discovery errors detected" - if grep -q "ImportError" collection_output.txt; then - ERROR_TYPE="ImportError" - elif grep -q "ModuleNotFoundError" collection_output.txt; then - ERROR_TYPE="ModuleNotFoundError" - elif grep -q "SyntaxError" collection_output.txt; then - ERROR_TYPE="SyntaxError" - elif grep -q "ERROR collecting" collection_output.txt; then - ERROR_TYPE="CollectionError" - elif grep -q "Interrupted:" collection_output.txt; then - ERROR_TYPE="Interrupted" - else - ERROR_TYPE="UnknownError" - fi - HAS_COLLECTION_ERRORS="true" - else - TEST_COUNT=$(grep -o "collected [0-9]* item" collection_output.txt | grep -o "[0-9]*" || echo "0") - if [[ "$TEST_COUNT" == "0" ]]; then - echo "::warning::No tests were found" - NO_TESTS_FOUND="true" - ERROR_TYPE="NoTestsFound" - else - echo "✅ Found $TEST_COUNT tests" - fi - fi - - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT - else - echo "has_errors=false" >> $GITHUB_OUTPUT - fi - - - name: Run tests - id: run-tests - continue-on-error: true - if: | - steps.final-status.outputs.cache_hit != 'true' && - steps.check-collection.outputs.has_collection_errors != 'true' - run: | - # Determine worker count based on input or runner type - WORKERS="${{ inputs.parallel_workers }}" - if [ -z "$WORKERS" ]; then - # Auto-detect based on runner type - if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then - WORKERS="6" - else - WORKERS="1" - fi - fi - echo "🧪 Running tests with $WORKERS workers..." - - PARALLEL_FLAG="" - if [ "$WORKERS" != "1" ]; then - PARALLEL_FLAG="-n $WORKERS" - fi - - # Run pytest and capture exit code - # Use quiet mode for console (-q) - full details in JSON report - set +e - python -m pytest -q $PARALLEL_FLAG --json-report --json-report-file=results.json --tb=line 2>&1 | tee test_output.txt - PYTEST_EXIT=$? - set -e - - echo "pytest_exit_code=$PYTEST_EXIT" >> $GITHUB_OUTPUT - - if [ $PYTEST_EXIT -eq 137 ]; then - echo "::warning::Tests were killed (exit 137) - likely OOM. Partial results may be available." - fi - - if [ -f results.json ]; then - echo "✅ Test execution completed (exit code: $PYTEST_EXIT)" - else - echo "❌ No results.json - creating empty results file" - echo '{"exitcode": '$PYTEST_EXIT', "summary": {"total": 0, "passed": 0}, "tests": []}' > results.json - fi - - - name: Extract test results - id: extract-results - if: steps.final-status.outputs.cache_hit != 'true' - run: | - python3 -c " - import json - import os - - total = passed = 0 - percentage = 0.0 - passing_tests = [] - failing_tests = [] - error_tests = [] - skipped_tests = [] - xfailed_tests = [] - xpassed_tests = [] - all_tests = [] - skipped_with_reasons = {} - xfailed_with_reasons = {} - warnings_list = [] - - try: - with open('results.json') as f: - results = json.load(f) - - # Extract results regardless of exit code - we want to capture - # whatever tests ran, even if pytest had errors - if 'summary' in results: - summary = results['summary'] - total = summary.get('total', 0) - passed = summary.get('passed', 0) - - for test in results.get('tests', []): - outcome = test.get('outcome') - nodeid = test.get('nodeid', '') - if not nodeid: - continue - all_tests.append(nodeid) - if outcome == 'passed': - passing_tests.append(nodeid) - elif outcome == 'failed': - failing_tests.append(nodeid) - elif outcome == 'error': - error_tests.append(nodeid) - elif outcome == 'skipped': - skipped_tests.append(nodeid) - reason = test.get('longrepr', 'No reason') - if isinstance(reason, list): - reason = reason[0] if reason else 'No reason' - skipped_with_reasons[nodeid] = str(reason).strip() - elif outcome == 'xfailed': - xfailed_tests.append(nodeid) - reason = test.get('longrepr', 'No reason') - if isinstance(reason, list): - reason = reason[0] if reason else 'No reason' - xfailed_with_reasons[nodeid] = str(reason).strip() - elif outcome == 'xpassed': - xpassed_tests.append(nodeid) - - percentage = (passed / total * 100) if total > 0 else 0 - except FileNotFoundError: - print('Results file not found') - except Exception as e: - print(f'Error: {e}') - - # Extract warnings - try: - with open('test_output.txt') as f: - content = f.read() - if 'warnings summary' in content: - section = content.split('warnings summary')[1].split('-- Docs:')[0] if '-- Docs:' in content else content.split('warnings summary')[1] - current = [] - for line in section.split('\n'): - line = line.rstrip() - if not line or line.startswith('='): - continue - if not line.startswith(' ') and ('.py:' in line or 'warning' in line.lower()): - if current: - warnings_list.append('\n'.join(current)) - current = [line] - elif line.startswith(' ') and current: - current.append(line) - if current: - warnings_list.append('\n'.join(current)) - except: - pass - - # Save artifact data - with open('test_data.json', 'w') as f: - json.dump({ - 'passing_tests': passing_tests, - 'failing_tests': failing_tests, - 'error_tests': error_tests, - 'skipped_tests': skipped_tests, - 'xfailed_tests': xfailed_tests, - 'xpassed_tests': xpassed_tests, - 'all_tests': all_tests, - 'skipped_tests_with_reasons': skipped_with_reasons, - 'xfailed_tests_with_reasons': xfailed_with_reasons, - 'warnings': warnings_list - }, f, indent=2) - - print(f'Results: {passed}/{total} ({percentage:.1f}%)') - - with open(os.environ['GITHUB_OUTPUT'], 'a') as f: - f.write(f'total={total}\n') - f.write(f'passed={passed}\n') - f.write(f'percentage={percentage:.2f}\n') - f.write(f'failing_count={len(failing_tests)}\n') - f.write(f'error_count={len(error_tests)}\n') - f.write(f'skipped_count={len(skipped_tests)}\n') - f.write(f'xfailed_count={len(xfailed_tests)}\n') - f.write(f'xpassed_count={len(xpassed_tests)}\n') - " - - - name: Save results to cache - if: inputs.use_target_cache && steps.final-status.outputs.cache_hit != 'true' - run: | - echo "💾 Saving results to cache..." - mkdir -p cached_target - - # Copy test artifacts to cache directory - [ -f test_data.json ] && cp test_data.json cached_target/ - [ -f test_output.txt ] && cp test_output.txt cached_target/ - [ -f results.json ] && cp results.json cached_target/ - - # Mark cache as complete (not pending) - echo "complete" > cached_target/status - echo "completed=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> cached_target/status - echo "run_id=${{ github.run_id }}" >> cached_target/status - - # Save outputs for future cache loads (no leading spaces!) - cat > cached_target/outputs.env << 'EOF' - total=${{ steps.extract-results.outputs.total || '0' }} - passed=${{ steps.extract-results.outputs.passed || '0' }} - percentage=${{ steps.extract-results.outputs.percentage || '0.00' }} - collection_errors=${{ steps.check-collection.outputs.has_collection_errors || 'false' }} - no_tests_found=${{ steps.check-collection.outputs.no_tests_found || 'false' }} - has_errors=${{ steps.check-collection.outputs.has_errors || 'false' }} - error_type=${{ steps.check-collection.outputs.error_type || 'none' }} - failing_count=${{ steps.extract-results.outputs.failing_count || '0' }} - error_count=${{ steps.extract-results.outputs.error_count || '0' }} - skipped_count=${{ steps.extract-results.outputs.skipped_count || '0' }} - xfailed_count=${{ steps.extract-results.outputs.xfailed_count || '0' }} - xpassed_count=${{ steps.extract-results.outputs.xpassed_count || '0' }} - EOF - # Remove leading whitespace from the env file - sed -i 's/^[[:space:]]*//' cached_target/outputs.env - - # Save complete results so other PRs can find it - - name: Upload to cache - if: inputs.use_target_cache && steps.final-status.outputs.cache_hit != 'true' - uses: actions/cache/save@v4 - with: - path: cached_target - key: ${{ steps.cache-keys.outputs.base_key }} - - - name: Upload test artifacts - if: steps.final-status.outputs.cache_hit != 'true' - uses: actions/upload-artifact@v4 - with: - name: pytest_target_${{ github.event.pull_request.number || github.run_id }} - path: | - test_data.json - test_output.txt - results.json - retention-days: 3 - if-no-files-found: ignore - - # Consolidate outputs from cache or fresh run - - name: Set final outputs - id: results - run: | - if [ "${{ steps.final-status.outputs.cache_hit }}" == "true" ]; then - echo "📋 Using cached results" - # Outputs already set by load-cache step, copy them - echo "total=${{ steps.load-cache.outputs.total || '0' }}" >> $GITHUB_OUTPUT - echo "passed=${{ steps.load-cache.outputs.passed || '0' }}" >> $GITHUB_OUTPUT - echo "percentage=${{ steps.load-cache.outputs.percentage || '0.00' }}" >> $GITHUB_OUTPUT - echo "collection_errors=${{ steps.load-cache.outputs.collection_errors || 'false' }}" >> $GITHUB_OUTPUT - echo "no_tests_found=${{ steps.load-cache.outputs.no_tests_found || 'false' }}" >> $GITHUB_OUTPUT - echo "has_errors=${{ steps.load-cache.outputs.has_errors || 'false' }}" >> $GITHUB_OUTPUT - echo "error_type=${{ steps.load-cache.outputs.error_type || 'none' }}" >> $GITHUB_OUTPUT - echo "failing_count=${{ steps.load-cache.outputs.failing_count || '0' }}" >> $GITHUB_OUTPUT - echo "error_count=${{ steps.load-cache.outputs.error_count || '0' }}" >> $GITHUB_OUTPUT - echo "skipped_count=${{ steps.load-cache.outputs.skipped_count || '0' }}" >> $GITHUB_OUTPUT - echo "xfailed_count=${{ steps.load-cache.outputs.xfailed_count || '0' }}" >> $GITHUB_OUTPUT - echo "xpassed_count=${{ steps.load-cache.outputs.xpassed_count || '0' }}" >> $GITHUB_OUTPUT - else - echo "📋 Using fresh results" - echo "total=${{ steps.extract-results.outputs.total || '0' }}" >> $GITHUB_OUTPUT - echo "passed=${{ steps.extract-results.outputs.passed || '0' }}" >> $GITHUB_OUTPUT - echo "percentage=${{ steps.extract-results.outputs.percentage || '0.00' }}" >> $GITHUB_OUTPUT - echo "collection_errors=${{ steps.check-collection.outputs.has_collection_errors || 'false' }}" >> $GITHUB_OUTPUT - echo "no_tests_found=${{ steps.check-collection.outputs.no_tests_found || 'false' }}" >> $GITHUB_OUTPUT - echo "has_errors=${{ steps.check-collection.outputs.has_errors || 'false' }}" >> $GITHUB_OUTPUT - echo "error_type=${{ steps.check-collection.outputs.error_type || 'none' }}" >> $GITHUB_OUTPUT - echo "failing_count=${{ steps.extract-results.outputs.failing_count || '0' }}" >> $GITHUB_OUTPUT - echo "error_count=${{ steps.extract-results.outputs.error_count || '0' }}" >> $GITHUB_OUTPUT - echo "skipped_count=${{ steps.extract-results.outputs.skipped_count || '0' }}" >> $GITHUB_OUTPUT - echo "xfailed_count=${{ steps.extract-results.outputs.xfailed_count || '0' }}" >> $GITHUB_OUTPUT - echo "xpassed_count=${{ steps.extract-results.outputs.xpassed_count || '0' }}" >> $GITHUB_OUTPUT - fi + uses: ./.github/workflows/test-py-pytest.yml + with: + ref: ${{ inputs.target_branch }} + python-version: ${{ inputs.python-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: pytest_target_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} - # Compare results - compare: - needs: [test-source, test-target] - if: always() && needs.test-source.result == 'success' + # Compare pytest results + pytest-compare: + needs: [detect-frameworks, pytest-source, pytest-target] + if: always() && needs.detect-frameworks.outputs.has_pytest == 'true' && needs.pytest-source.result == 'success' uses: ./.github/workflows/regression-test.yml with: runs_on: ${{ inputs.runs_on }} @@ -633,53 +254,183 @@ jobs: current_label: ${{ github.head_ref || github.ref_name }} current_results_artifact: pytest_source_${{ github.event.pull_request.number || github.run_id }} current_results_filename: test_data.json - baseline_passed: ${{ needs.test-target.outputs.passed }} - baseline_total: ${{ needs.test-target.outputs.total }} - baseline_percentage: ${{ needs.test-target.outputs.percentage }} - current_passed: ${{ needs.test-source.outputs.passed }} - current_total: ${{ needs.test-source.outputs.total }} - current_percentage: ${{ needs.test-source.outputs.percentage }} - baseline_collection_errors: ${{ needs.test-target.outputs.collection_errors }} - baseline_no_tests_found: ${{ needs.test-target.outputs.no_tests_found }} - current_collection_errors: ${{ needs.test-source.outputs.collection_errors }} - current_no_tests_found: ${{ needs.test-source.outputs.no_tests_found }} + baseline_passed: ${{ needs.pytest-target.outputs.passed }} + baseline_total: ${{ needs.pytest-target.outputs.total }} + baseline_percentage: ${{ needs.pytest-target.outputs.percentage }} + current_passed: ${{ needs.pytest-source.outputs.passed }} + current_total: ${{ needs.pytest-source.outputs.total }} + current_percentage: ${{ needs.pytest-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.pytest-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.pytest-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.pytest-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.pytest-source.outputs.no_tests_found }} artifact_name: regression_pytest_${{ github.event.pull_request.number || github.run_id }} - # Notify on regressions - notify: - needs: [test-source, test-target, compare] - if: | - always() && - (needs.compare.outputs.has_regressions == 'true' || needs.compare.result == 'failure') - runs-on: ${{ fromJSON(inputs.runs_on) }} - steps: - - name: Send notification - env: - WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_URL }} - run: | - if [ -z "$WEBHOOK" ]; then - echo "No Discord webhook configured, skipping notification" - exit 0 - fi + # ==================== JEST ==================== + # Test source branch with Jest + jest-source: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_jest == 'true' + uses: ./.github/workflows/test-js-jest.yml + with: + ref: "" # Default checkout = PR branch + node-version: ${{ inputs.node-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: jest_source_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + jest-command: ${{ inputs.jest-command }} + jest-extra-args: ${{ inputs.jest-extra-args }} + working-directory: ${{ inputs.working-directory }} - MSG="**Pytest Regression Alert**\n" - MSG+="PR #${{ github.event.pull_request.number }}: ${{ github.event.pull_request.title }}\n" - MSG+="\`${{ github.head_ref }}\` → \`${{ inputs.target_branch }}\`\n\n" - MSG+="Source: ${{ needs.test-source.outputs.passed }}/${{ needs.test-source.outputs.total }}\n" - MSG+="Target: ${{ needs.test-target.outputs.passed }}/${{ needs.test-target.outputs.total }}\n" - MSG+="Regressions: ${{ needs.compare.outputs.regression_count || '?' }}\n\n" - MSG+="[View Run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})" + # Test target branch with Jest + jest-target: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_jest == 'true' + uses: ./.github/workflows/test-js-jest.yml + with: + ref: ${{ inputs.target_branch }} + node-version: ${{ inputs.node-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: jest_target_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + jest-command: ${{ inputs.jest-command }} + jest-extra-args: ${{ inputs.jest-extra-args }} + working-directory: ${{ inputs.working-directory }} + + # Compare Jest results + jest-compare: + needs: [detect-frameworks, jest-source, jest-target] + if: always() && needs.detect-frameworks.outputs.has_jest == 'true' && needs.jest-source.result == 'success' + uses: ./.github/workflows/regression-test.yml + with: + runs_on: ${{ inputs.runs_on }} + baseline_label: ${{ inputs.target_branch }} + baseline_results_artifact: jest_target_${{ github.event.pull_request.number || github.run_id }} + baseline_results_filename: test_data.json + current_label: ${{ github.head_ref || github.ref_name }} + current_results_artifact: jest_source_${{ github.event.pull_request.number || github.run_id }} + current_results_filename: test_data.json + baseline_passed: ${{ needs.jest-target.outputs.passed }} + baseline_total: ${{ needs.jest-target.outputs.total }} + baseline_percentage: ${{ needs.jest-target.outputs.percentage }} + current_passed: ${{ needs.jest-source.outputs.passed }} + current_total: ${{ needs.jest-source.outputs.total }} + current_percentage: ${{ needs.jest-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.jest-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.jest-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.jest-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.jest-source.outputs.no_tests_found }} + artifact_name: regression_jest_${{ github.event.pull_request.number || github.run_id }} + + # ==================== MOCHA ==================== + # Test source branch with Mocha + mocha-source: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_mocha == 'true' + uses: ./.github/workflows/test-js-mocha.yml + with: + ref: "" # Default checkout = PR branch + node-version: ${{ inputs.node-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: mocha_source_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + mocha-command: ${{ inputs.mocha-command }} + mocha-extra-args: ${{ inputs.mocha-extra-args }} + working-directory: ${{ inputs.working-directory }} - curl -s -H "Content-Type: application/json" \ - -d "{\"content\": \"$(echo -e "$MSG")\"}" \ - "$WEBHOOK" || true + # Test target branch with Mocha + mocha-target: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_mocha == 'true' + uses: ./.github/workflows/test-js-mocha.yml + with: + ref: ${{ inputs.target_branch }} + node-version: ${{ inputs.node-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: mocha_target_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + mocha-command: ${{ inputs.mocha-command }} + mocha-extra-args: ${{ inputs.mocha-extra-args }} + working-directory: ${{ inputs.working-directory }} + + # Compare Mocha results + mocha-compare: + needs: [detect-frameworks, mocha-source, mocha-target] + if: always() && needs.detect-frameworks.outputs.has_mocha == 'true' && needs.mocha-source.result == 'success' + uses: ./.github/workflows/regression-test.yml + with: + runs_on: ${{ inputs.runs_on }} + baseline_label: ${{ inputs.target_branch }} + baseline_results_artifact: mocha_target_${{ github.event.pull_request.number || github.run_id }} + baseline_results_filename: test_data.json + current_label: ${{ github.head_ref || github.ref_name }} + current_results_artifact: mocha_source_${{ github.event.pull_request.number || github.run_id }} + current_results_filename: test_data.json + baseline_passed: ${{ needs.mocha-target.outputs.passed }} + baseline_total: ${{ needs.mocha-target.outputs.total }} + baseline_percentage: ${{ needs.mocha-target.outputs.percentage }} + current_passed: ${{ needs.mocha-source.outputs.passed }} + current_total: ${{ needs.mocha-source.outputs.total }} + current_percentage: ${{ needs.mocha-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.mocha-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.mocha-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.mocha-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.mocha-source.outputs.no_tests_found }} + artifact_name: regression_mocha_${{ github.event.pull_request.number || github.run_id }} + + # ==================== RUST/CARGO ==================== + # Test source branch with cargo + cargo-source: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_cargo == 'true' + uses: ./.github/workflows/test-rs-cargo.yml + with: + ref: "" # Default checkout = PR branch + rust-version: ${{ inputs.rust-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: cargo_source_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} - # ============================================ - # C++ Tests (GTest/CTest) - # ============================================ + # Test target branch with cargo + cargo-target: + needs: detect-frameworks + if: needs.detect-frameworks.outputs.has_cargo == 'true' + uses: ./.github/workflows/test-rs-cargo.yml + with: + ref: ${{ inputs.target_branch }} + rust-version: ${{ inputs.rust-version }} + runs_on: ${{ inputs.runs_on }} + artifact_name: cargo_target_${{ github.event.pull_request.number || github.run_id }} + parallel_workers: ${{ inputs.parallel_workers }} + # Compare cargo results + cargo-compare: + needs: [detect-frameworks, cargo-source, cargo-target] + if: always() && needs.detect-frameworks.outputs.has_cargo == 'true' && needs.cargo-source.result == 'success' + uses: ./.github/workflows/regression-test.yml + with: + runs_on: ${{ inputs.runs_on }} + baseline_label: ${{ inputs.target_branch }} + baseline_results_artifact: cargo_target_${{ github.event.pull_request.number || github.run_id }} + baseline_results_filename: test_data.json + current_label: ${{ github.head_ref || github.ref_name }} + current_results_artifact: cargo_source_${{ github.event.pull_request.number || github.run_id }} + current_results_filename: test_data.json + baseline_passed: ${{ needs.cargo-target.outputs.passed }} + baseline_total: ${{ needs.cargo-target.outputs.total }} + baseline_percentage: ${{ needs.cargo-target.outputs.percentage }} + current_passed: ${{ needs.cargo-source.outputs.passed }} + current_total: ${{ needs.cargo-source.outputs.total }} + current_percentage: ${{ needs.cargo-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.cargo-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.cargo-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.cargo-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.cargo-source.outputs.no_tests_found }} + artifact_name: regression_cargo_${{ github.event.pull_request.number || github.run_id }} + + # ==================== C++ (GTest/CTest) ==================== # Test C++ source branch - test-source-cpp: + cpp-source: needs: detect-frameworks if: needs.detect-frameworks.outputs.has_cpp == 'true' uses: ./.github/workflows/test-cpp-gtest.yml @@ -696,7 +447,7 @@ jobs: parallel_workers: ${{ inputs.parallel_workers }} # Test C++ target branch - test-target-cpp: + cpp-target: needs: detect-frameworks if: needs.detect-frameworks.outputs.has_cpp == 'true' uses: ./.github/workflows/test-cpp-gtest.yml @@ -713,9 +464,9 @@ jobs: parallel_workers: ${{ inputs.parallel_workers }} # Compare C++ results - compare-cpp: - needs: [test-source-cpp, test-target-cpp] - if: always() && needs.test-source-cpp.result == 'success' + cpp-compare: + needs: [detect-frameworks, cpp-source, cpp-target] + if: always() && needs.detect-frameworks.outputs.has_cpp == 'true' && needs.cpp-source.result == 'success' uses: ./.github/workflows/regression-test.yml with: runs_on: ${{ inputs.runs_on }} @@ -725,24 +476,91 @@ jobs: current_label: ${{ github.head_ref || github.ref_name }} current_results_artifact: cpp_source_${{ github.event.pull_request.number || github.run_id }} current_results_filename: test_data.json - baseline_passed: ${{ needs.test-target-cpp.outputs.passed }} - baseline_total: ${{ needs.test-target-cpp.outputs.total }} - baseline_percentage: ${{ needs.test-target-cpp.outputs.percentage }} - current_passed: ${{ needs.test-source-cpp.outputs.passed }} - current_total: ${{ needs.test-source-cpp.outputs.total }} - current_percentage: ${{ needs.test-source-cpp.outputs.percentage }} - baseline_collection_errors: ${{ needs.test-target-cpp.outputs.collection_errors }} - baseline_no_tests_found: ${{ needs.test-target-cpp.outputs.no_tests_found }} - current_collection_errors: ${{ needs.test-source-cpp.outputs.collection_errors }} - current_no_tests_found: ${{ needs.test-source-cpp.outputs.no_tests_found }} + baseline_passed: ${{ needs.cpp-target.outputs.passed }} + baseline_total: ${{ needs.cpp-target.outputs.total }} + baseline_percentage: ${{ needs.cpp-target.outputs.percentage }} + current_passed: ${{ needs.cpp-source.outputs.passed }} + current_total: ${{ needs.cpp-source.outputs.total }} + current_percentage: ${{ needs.cpp-source.outputs.percentage }} + baseline_collection_errors: ${{ needs.cpp-target.outputs.collection_errors }} + baseline_no_tests_found: ${{ needs.cpp-target.outputs.no_tests_found }} + current_collection_errors: ${{ needs.cpp-source.outputs.collection_errors }} + current_no_tests_found: ${{ needs.cpp-source.outputs.no_tests_found }} artifact_name: regression_cpp_${{ github.event.pull_request.number || github.run_id }} - # Notify on C++ regressions - notify-cpp: - needs: [test-source-cpp, test-target-cpp, compare-cpp] - if: | - always() && - (needs.compare-cpp.outputs.has_regressions == 'true' || needs.compare-cpp.result == 'failure') + # ==================== AGGREGATE RESULTS ==================== + aggregate-results: + needs: [detect-frameworks, pytest-compare, jest-compare, mocha-compare, cargo-compare, cpp-compare] + if: always() + runs-on: ${{ fromJSON(inputs.runs_on) }} + outputs: + has_regressions: ${{ steps.aggregate.outputs.has_regressions }} + regression_count: ${{ steps.aggregate.outputs.regression_count }} + steps: + - name: Aggregate regression results + id: aggregate + run: | + TOTAL_REGRESSIONS=0 + HAS_REGRESSIONS="false" + + # Check pytest + if [ "${{ needs.detect-frameworks.outputs.has_pytest }}" == "true" ]; then + PYTEST_REGRESSIONS="${{ needs.pytest-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.pytest-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + PYTEST_REGRESSIONS)) + echo "Pytest regressions: $PYTEST_REGRESSIONS" + fi + fi + + # Check Jest + if [ "${{ needs.detect-frameworks.outputs.has_jest }}" == "true" ]; then + JEST_REGRESSIONS="${{ needs.jest-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.jest-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + JEST_REGRESSIONS)) + echo "Jest regressions: $JEST_REGRESSIONS" + fi + fi + + # Check Mocha + if [ "${{ needs.detect-frameworks.outputs.has_mocha }}" == "true" ]; then + MOCHA_REGRESSIONS="${{ needs.mocha-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.mocha-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + MOCHA_REGRESSIONS)) + echo "Mocha regressions: $MOCHA_REGRESSIONS" + fi + fi + + # Check Cargo + if [ "${{ needs.detect-frameworks.outputs.has_cargo }}" == "true" ]; then + CARGO_REGRESSIONS="${{ needs.cargo-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.cargo-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + CARGO_REGRESSIONS)) + echo "Cargo regressions: $CARGO_REGRESSIONS" + fi + fi + + # Check C++ + if [ "${{ needs.detect-frameworks.outputs.has_cpp }}" == "true" ]; then + CPP_REGRESSIONS="${{ needs.cpp-compare.outputs.regression_count || '0' }}" + if [ "${{ needs.cpp-compare.outputs.has_regressions }}" == "true" ]; then + HAS_REGRESSIONS="true" + TOTAL_REGRESSIONS=$((TOTAL_REGRESSIONS + CPP_REGRESSIONS)) + echo "C++ regressions: $CPP_REGRESSIONS" + fi + fi + + echo "has_regressions=$HAS_REGRESSIONS" >> "$GITHUB_OUTPUT" + echo "regression_count=$TOTAL_REGRESSIONS" >> "$GITHUB_OUTPUT" + echo "Total regressions across all frameworks: $TOTAL_REGRESSIONS" + + # ==================== NOTIFICATIONS ==================== + notify: + needs: [detect-frameworks, pytest-source, pytest-target, pytest-compare, jest-source, jest-target, jest-compare, mocha-source, mocha-target, mocha-compare, cargo-source, cargo-target, cargo-compare, cpp-source, cpp-target, cpp-compare, aggregate-results] + if: always() && needs.aggregate-results.outputs.has_regressions == 'true' runs-on: ${{ fromJSON(inputs.runs_on) }} steps: - name: Send notification @@ -754,12 +572,66 @@ jobs: exit 0 fi - MSG="**C++ Test Regression Alert**\n" + MSG="**Test Regression Alert**\n" MSG+="PR #${{ github.event.pull_request.number }}: ${{ github.event.pull_request.title }}\n" MSG+="\`${{ github.head_ref }}\` → \`${{ inputs.target_branch }}\`\n\n" - MSG+="Source: ${{ needs.test-source-cpp.outputs.passed }}/${{ needs.test-source-cpp.outputs.total }}\n" - MSG+="Target: ${{ needs.test-target-cpp.outputs.passed }}/${{ needs.test-target-cpp.outputs.total }}\n" - MSG+="Regressions: ${{ needs.compare-cpp.outputs.regression_count || '?' }}\n\n" + + # Pytest results + if [ "${{ needs.detect-frameworks.outputs.has_pytest }}" == "true" ]; then + MSG+="**Pytest:**\n" + MSG+=" Source: ${{ needs.pytest-source.outputs.passed }}/${{ needs.pytest-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.pytest-target.outputs.passed }}/${{ needs.pytest-target.outputs.total }}\n" + if [ "${{ needs.pytest-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.pytest-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + + # Jest results + if [ "${{ needs.detect-frameworks.outputs.has_jest }}" == "true" ]; then + MSG+="**Jest:**\n" + MSG+=" Source: ${{ needs.jest-source.outputs.passed }}/${{ needs.jest-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.jest-target.outputs.passed }}/${{ needs.jest-target.outputs.total }}\n" + if [ "${{ needs.jest-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.jest-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + + # Mocha results + if [ "${{ needs.detect-frameworks.outputs.has_mocha }}" == "true" ]; then + MSG+="**Mocha:**\n" + MSG+=" Source: ${{ needs.mocha-source.outputs.passed }}/${{ needs.mocha-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.mocha-target.outputs.passed }}/${{ needs.mocha-target.outputs.total }}\n" + if [ "${{ needs.mocha-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.mocha-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + + # Cargo results + if [ "${{ needs.detect-frameworks.outputs.has_cargo }}" == "true" ]; then + MSG+="**Cargo (Rust):**\n" + MSG+=" Source: ${{ needs.cargo-source.outputs.passed }}/${{ needs.cargo-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.cargo-target.outputs.passed }}/${{ needs.cargo-target.outputs.total }}\n" + if [ "${{ needs.cargo-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.cargo-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + + # C++ results + if [ "${{ needs.detect-frameworks.outputs.has_cpp }}" == "true" ]; then + MSG+="**C++:**\n" + MSG+=" Source: ${{ needs.cpp-source.outputs.passed }}/${{ needs.cpp-source.outputs.total }}\n" + MSG+=" Target: ${{ needs.cpp-target.outputs.passed }}/${{ needs.cpp-target.outputs.total }}\n" + if [ "${{ needs.cpp-compare.outputs.has_regressions }}" == "true" ]; then + MSG+=" Regressions: ${{ needs.cpp-compare.outputs.regression_count }}\n" + fi + MSG+="\n" + fi + + MSG+="Total Regressions: ${{ needs.aggregate-results.outputs.regression_count }}\n\n" MSG+="[View Run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})" curl -s -H "Content-Type: application/json" \ diff --git a/.github/workflows/test-js-jest.yml b/.github/workflows/test-js-jest.yml index 71f3f60..c25294a 100644 --- a/.github/workflows/test-js-jest.yml +++ b/.github/workflows/test-js-jest.yml @@ -1,17 +1,32 @@ -name: Reusable Compare Jest Results +name: Reusable Jest Runner on: workflow_call: inputs: - target_branch_to_compare: - description: "The target branch to compare against (e.g., main, refs/heads/main)." - required: true + ref: + description: "Git ref to checkout and test. Leave empty for default checkout." + required: false type: string + default: "" node-version: description: "Node.js version to use for testing." required: false type: string default: "18" + runs_on: + description: "Runner label for the test job." + required: false + type: string + default: '["self-hosted", "multithreaded"]' + artifact_name: + description: "Name for the test results artifact." + required: true + type: string + parallel_workers: + description: "Number of parallel workers for Jest. Leave empty for runner default (6 for multithreaded, 1 for singlethreaded). Use 'auto' for cgroup-aware CPU count, or a number." + required: false + type: string + default: "" install-command: description: "Optional command to install dependencies (defaults to npm/pnpm/yarn auto-detection)." required: false @@ -26,62 +41,49 @@ on: description: "Additional arguments to pass to the Jest command (applied before workflow-managed flags)." required: false type: string - default: "--runInBand" + default: "" working-directory: description: "Directory where install and test commands should be executed." required: false type: string default: "." - ping_latest_committer: - description: "If true, the latest committer on the PR will be added to the ping list." - required: false - type: boolean - default: false - runs_on: - required: false - type: string - default: '["self-hosted", "multithreaded"]' - secrets: - DISCORD_WEBHOOK_URL: - description: "Discord Webhook URL for failure notifications. If not provided, notifications are skipped." - required: false - DISCORD_USER_MAP: - description: 'JSON string mapping GitHub usernames to Discord User IDs (e.g., {"user1":"id1"}). If not provided, users won''t be pinged.' - required: false outputs: - pr_total: - description: "Total tests in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.total }} - pr_passed: - description: "Passed tests in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.passed }} - pr_percentage: - description: "Pass percentage in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.percentage }} - pr_collection_errors: - description: "PR branch has collection errors" - value: ${{ jobs.test-source-branch.outputs.collection_errors }} - pr_no_tests_found: - description: "PR branch has no tests found" - value: ${{ jobs.test-source-branch.outputs.no_tests_found }} - target_total: - description: "Total tests in target branch" - value: ${{ jobs.test-target-branch.outputs.total }} - target_passed: - description: "Passed tests in target branch" - value: ${{ jobs.test-target-branch.outputs.passed }} - target_percentage: - description: "Pass percentage in target branch" - value: ${{ jobs.test-target-branch.outputs.percentage }} - has_regressions: - description: "Boolean indicating if regressions were found" - value: ${{ jobs.compare-results.outputs.has_regressions }} - regression_count: - description: "Number of test regressions found" - value: ${{ jobs.compare-results.outputs.regression_count }} + total: + description: "Total number of tests" + value: ${{ jobs.test.outputs.total }} + passed: + description: "Number of passing tests" + value: ${{ jobs.test.outputs.passed }} + percentage: + description: "Pass percentage" + value: ${{ jobs.test.outputs.percentage }} + collection_errors: + description: "Whether collection errors occurred" + value: ${{ jobs.test.outputs.collection_errors }} + no_tests_found: + description: "Whether no tests were found" + value: ${{ jobs.test.outputs.no_tests_found }} + has_errors: + description: "Whether any errors occurred" + value: ${{ jobs.test.outputs.has_errors }} + error_type: + description: "Type of error if any" + value: ${{ jobs.test.outputs.error_type }} + failing_count: + description: "Number of failing tests" + value: ${{ jobs.test.outputs.failing_count }} + error_count: + description: "Number of errored tests" + value: ${{ jobs.test.outputs.error_count }} + skipped_count: + description: "Number of skipped tests" + value: ${{ jobs.test.outputs.skipped_count }} + xfailed_count: + description: "Number of xfailed tests (todo in Jest)" + value: ${{ jobs.test.outputs.xfailed_count }} jobs: - test-source-branch: + test: runs-on: ${{ fromJSON(inputs.runs_on) }} outputs: total: ${{ steps.extract-results.outputs.total }} @@ -91,16 +93,17 @@ jobs: no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} has_errors: ${{ steps.check-collection.outputs.has_errors }} error_type: ${{ steps.check-collection.outputs.error_type }} - error_details: ${{ steps.check-collection.outputs.error_details }} failing_count: ${{ steps.extract-results.outputs.failing_count }} + error_count: ${{ steps.extract-results.outputs.error_count }} skipped_count: ${{ steps.extract-results.outputs.skipped_count }} xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} steps: - - name: Checkout PR Branch + - name: Checkout uses: actions/checkout@v4.2.2 with: submodules: "recursive" + ref: ${{ inputs.ref || github.ref }} - name: Set up Node.js uses: actions/setup-node@v4 @@ -150,7 +153,6 @@ jobs: HAS_COLLECTION_ERRORS="false" NO_TESTS_FOUND="false" ERROR_TYPE="none" - ERROR_DETAILS="none" JEST_COMMAND="${{ inputs['jest-command'] }}" JEST_EXTRA_ARGS="${{ inputs['jest-extra-args'] }}" @@ -176,417 +178,161 @@ jobs: else ERROR_TYPE="ExecutionError" fi - - ERROR_DETAILS=$(head -40 collection_output.txt | tr '\n' ' ' | sed 's/"/\\"/g') - echo "::error::Test discovery errors detected in PR branch via Jest ($ERROR_TYPE)" - echo "::error::First details: ${ERROR_DETAILS:0:200}..." + echo "::error::Test discovery errors detected via Jest ($ERROR_TYPE)" else TEST_COUNT=$(grep -cv '^[[:space:]]*$' collection_output.txt || echo "0") if [ "$TEST_COUNT" = "0" ]; then NO_TESTS_FOUND="true" ERROR_TYPE="NoTestsFound" - ERROR_DETAILS="Jest --listTests did not return any test files" - echo "::warning::No tests were found in the PR branch" + echo "::warning::No tests were found" else - echo "Found $TEST_COUNT test file(s) in PR branch" + echo "Found $TEST_COUNT test file(s)" fi fi - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT - + echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> "$GITHUB_OUTPUT" + echo "no_tests_found=$NO_TESTS_FOUND" >> "$GITHUB_OUTPUT" + echo "error_type=$ERROR_TYPE" >> "$GITHUB_OUTPUT" if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT - else - echo "has_errors=false" >> $GITHUB_OUTPUT - fi - - if [[ "$HAS_COLLECTION_ERRORS" == "true" ]]; then - echo "❌ Discovery Error: $ERROR_TYPE" - elif [[ "$NO_TESTS_FOUND" == "true" ]]; then - echo "⚠️ No Tests Found" + echo "has_errors=true" >> "$GITHUB_OUTPUT" else - echo "✅ Discovery Success" + echo "has_errors=false" >> "$GITHUB_OUTPUT" fi working-directory: ${{ inputs['working-directory'] }} - - name: Run tests on PR Branch + - name: Run tests + id: run-tests + continue-on-error: true if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - echo "Running Jest tests on PR branch..." - JEST_COMMAND="${{ inputs['jest-command'] }}" - JEST_EXTRA_ARGS="${{ inputs['jest-extra-args'] }}" - TEST_COMMAND="$JEST_COMMAND $JEST_EXTRA_ARGS --json --outputFile=pr_results.json --testLocationInResults" - echo "Executing: $TEST_COMMAND" - - set +e - eval "$TEST_COMMAND" > test_output.txt 2>&1 - EXIT_CODE=$? - set -e - - if [ -f pr_results.json ]; then - echo "✅ Test execution completed (exit code: $EXIT_CODE)" - else - echo "❌ Test execution did not produce pr_results.json (exit code: $EXIT_CODE)" - fi - working-directory: ${{ inputs['working-directory'] }} - - - name: Extract test results and create artifacts - id: extract-results - run: | - echo "PR_BRANCH=$(git rev-parse --abbrev-ref HEAD)" >> $GITHUB_ENV - - node <<'NODE' - const fs = require('fs'); - const path = require('path'); - - const resultsPath = path.resolve(process.cwd(), 'pr_results.json'); - let prTotal = 0; - let prPassed = 0; - let prPercentage = 0; - const passingTests = []; - const failingTests = []; - const skippedTests = []; - const xfailedTests = []; - const skippedTestsWithReasons = {}; - const xfailedTestsWithReasons = {}; - const allTests = []; - let warningsList = []; - - const safeRead = (filePath) => { - try { - return fs.readFileSync(filePath, 'utf-8'); - } catch (error) { - return null; - } - }; - - const rawResults = safeRead(resultsPath); - - if (rawResults) { - try { - const data = JSON.parse(rawResults); - if (typeof data.numTotalTests === 'number') { - prTotal = data.numTotalTests; - } - if (typeof data.numPassedTests === 'number') { - prPassed = data.numPassedTests; - } - - const suites = Array.isArray(data.testResults) ? data.testResults : []; - - for (const suite of suites) { - const suitePath = suite.name || suite.testFilePath || ''; - const relativeSuitePath = suitePath - ? path.relative(process.cwd(), suitePath) || suitePath - : ''; - const assertions = Array.isArray(suite.assertionResults) - ? suite.assertionResults - : []; - - for (const assertion of assertions) { - const titleParts = []; - if (Array.isArray(assertion.ancestorTitles)) { - for (const ancestor of assertion.ancestorTitles) { - if (ancestor) { - titleParts.push(String(ancestor)); - } - } - } - if (assertion.title) { - titleParts.push(String(assertion.title)); - } - - const fullName = titleParts.join(' › ') || assertion.fullName || assertion.title || 'Unnamed test'; - const identifier = relativeSuitePath - ? `${relativeSuitePath}::${fullName}` - : fullName; - - allTests.push(identifier); - - switch (assertion.status) { - case 'passed': - passingTests.push(identifier); - break; - case 'failed': - failingTests.push(identifier); - break; - case 'pending': - case 'skipped': - case 'todo': - skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = - assertion.status === 'todo' - ? 'Marked as todo in Jest suite' - : 'Test marked as skipped/pending in Jest output'; - break; - default: - // Treat any other status as skipped-equivalent - skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = 'Test not executed (treated as skipped)'; - break; - } - } - } - - if (!prTotal && allTests.length) { - prTotal = allTests.length; - } - if (!prPassed && passingTests.length) { - prPassed = passingTests.length; - } - - prPercentage = prTotal > 0 ? (prPassed / prTotal) * 100 : 0; - - console.log(`Parsed Jest results: ${prPassed}/${prTotal} passed (${prPercentage.toFixed(2)}%)`); - console.log(`Failing tests: ${failingTests.length}`); - console.log(`Skipped tests: ${skippedTests.length}`); - console.log(`Total discovered tests: ${allTests.length}`); - } catch (error) { - console.log(`Error parsing pr_results.json: ${error.message}`); - } - } else { - console.log('pr_results.json not found. No test data to parse.'); - } + set -euo pipefail + + cgroup_auto_workers() { + local n="" + + # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " + if [ -f /sys/fs/cgroup/cpu.max ]; then + local quota period + quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" + period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" + if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi - const outputContent = safeRead(path.resolve(process.cwd(), 'test_output.txt')); - if (outputContent) { - const warnLines = outputContent - .split('\n') - .map((line) => line.trimEnd()) - .filter((line) => /\bWARN(?:ING)?\b/.test(line)); - if (warnLines.length) { - warningsList = warnLines.slice(0, 200); - console.log(`Collected ${warningsList.length} warning line(s) from Jest output.`); - } - } + # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us + if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then + local quota period + quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" + period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" + if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi - const testData = { - passing_tests: passingTests, - failing_tests: failingTests, - skipped_tests: skippedTests, - xfailed_tests: xfailedTests, - all_tests: allTests, - skipped_tests_with_reasons: skippedTestsWithReasons, - xfailed_tests_with_reasons: xfailedTestsWithReasons, - warnings: warningsList, - }; + # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) + if [ -z "$n" ]; then + local f="" + if [ -f /sys/fs/cgroup/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset.cpus" + elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset/cpuset.cpus" + fi - fs.writeFileSync('pr_test_data.json', JSON.stringify(testData, null, 2)); - console.log('Wrote pr_test_data.json with standardized Jest results.'); + if [ -n "$f" ]; then + local spec + spec="$(cat "$f" | tr -d '[:space:]')" + if [ -n "$spec" ]; then + local count=0 + IFS=',' read -r -a parts <<< "$spec" + for p in "${parts[@]}"; do + if [[ "$p" == *-* ]]; then + local a="${p%%-*}" + local b="${p##*-}" + if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then + count=$((count + b - a + 1)) + fi + elif [[ "$p" =~ ^[0-9]+$ ]]; then + count=$((count + 1)) + fi + done + if [ "$count" -gt 0 ]; then + n="$count" + fi + fi + fi + fi - const percentageString = Number.isFinite(prPercentage) ? prPercentage.toFixed(2) : '0.00'; - const outputLines = [ - `total=${prTotal}`, - `passed=${prPassed}`, - `percentage=${percentageString}`, - `failing_count=${failingTests.length}`, - `skipped_count=${skippedTests.length}`, - `xfailed_count=${xfailedTests.length}`, - ]; + if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then + n="1" + fi - if (process.env.GITHUB_OUTPUT) { - fs.appendFileSync(process.env.GITHUB_OUTPUT, `${outputLines.join('\n')}\n`); + echo "$n" } - NODE - - echo "✅ Test results: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} passed (${{ steps.extract-results.outputs.percentage }}%)" - working-directory: ${{ inputs['working-directory'] }} - - - name: Upload PR branch artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - path: | - ${{ inputs['working-directory'] }}/pr_test_data.json - ${{ inputs['working-directory'] }}/test_output.txt - ${{ inputs['working-directory'] }}/pr_results.json - ${{ inputs['working-directory'] }}/collection_output.txt - retention-days: 3 - if-no-files-found: ignore - - test-target-branch: - runs-on: ${{ fromJSON(inputs.runs_on) }} - outputs: - total: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.total || steps.extract-results.outputs.total }} - passed: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passed || steps.extract-results.outputs.passed }} - percentage: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.percentage || steps.extract-results.outputs.percentage }} - collection_errors: ${{ steps.check-collection.outputs.has_collection_errors }} - no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} - has_errors: ${{ steps.check-collection.outputs.has_errors }} - error_type: ${{ steps.check-collection.outputs.error_type }} - error_details: ${{ steps.check-collection.outputs.error_details }} - passing_count: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passing_count || steps.extract-results.outputs.passing_count }} - - steps: - - name: Checkout target branch - uses: actions/checkout@v4.2.2 - with: - submodules: "recursive" - ref: ${{ inputs.target_branch_to_compare }} - - - name: Set up Node.js - uses: actions/setup-node@v4 - with: - node-version: "${{ inputs['node-version'] }}" - - - name: Install dependencies - run: | - set -e - if command -v corepack >/dev/null 2>&1; then - corepack enable >/dev/null 2>&1 || true - fi - INSTALL_COMMAND="${{ inputs['install-command'] }}" - if [ -n "$INSTALL_COMMAND" ]; then - echo "Running custom install command: $INSTALL_COMMAND" - eval "$INSTALL_COMMAND" - elif [ -f package-lock.json ]; then - echo "Detected package-lock.json; running npm ci" - npm ci - elif [ -f yarn.lock ]; then - if command -v yarn >/dev/null 2>&1; then - echo "Detected yarn.lock; running yarn install --frozen-lockfile" - yarn install --frozen-lockfile + WORKERS="${{ inputs.parallel_workers }}" + if [ -z "$WORKERS" ]; then + if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then + WORKERS="6" else - echo "::warning::yarn.lock detected but yarn is unavailable. Falling back to npm install." - npm install + WORKERS="1" fi - elif [ -f pnpm-lock.yaml ]; then - if command -v pnpm >/dev/null 2>&1; then - echo "Detected pnpm-lock.yaml; running pnpm install --frozen-lockfile" - pnpm install --frozen-lockfile - else - echo "::warning::pnpm-lock.yaml detected but pnpm is unavailable. Falling back to npm install." - npm install - fi - else - echo "No lockfile detected; running npm install" - npm install + elif [ "$WORKERS" = "auto" ]; then + WORKERS="$(cgroup_auto_workers)" fi - working-directory: ${{ inputs['working-directory'] }} - - name: Check for test collection errors - id: check-collection - run: | - exec 3>&1 4>&2 - exec 1> >(tee -a debug_target_collection.log) 2>&1 - - echo "Running Jest collection check on target branch..." + echo "Running tests with $WORKERS workers..." - HAS_COLLECTION_ERRORS="false" - NO_TESTS_FOUND="false" - ERROR_TYPE="none" - ERROR_DETAILS="none" + # Build parallel flag for Jest + PARALLEL_FLAG="" + if [ "$WORKERS" = "1" ]; then + PARALLEL_FLAG="--runInBand" + else + PARALLEL_FLAG="--maxWorkers=$WORKERS" + fi JEST_COMMAND="${{ inputs['jest-command'] }}" JEST_EXTRA_ARGS="${{ inputs['jest-extra-args'] }}" - COLLECTION_COMMAND="$JEST_COMMAND $JEST_EXTRA_ARGS --listTests" - - echo "Executing: $COLLECTION_COMMAND" set +e - eval "$COLLECTION_COMMAND" > collection_output.txt 2>&1 - EXIT_CODE=$? + eval "$JEST_COMMAND $JEST_EXTRA_ARGS $PARALLEL_FLAG --json --outputFile=results.json --testLocationInResults" 2>&1 | tee test_output.txt + JEST_EXIT=$? set -e - exec 1>&3 2>&4 + echo "jest_exit_code=$JEST_EXIT" >> "$GITHUB_OUTPUT" - if [ "$EXIT_CODE" -ne 0 ]; then - HAS_COLLECTION_ERRORS="true" - if grep -qi "Cannot find module" collection_output.txt; then - ERROR_TYPE="ModuleNotFoundError" - elif grep -qi "SyntaxError" collection_output.txt; then - ERROR_TYPE="SyntaxError" - elif grep -qi "TypeError" collection_output.txt; then - ERROR_TYPE="TypeError" - elif grep -qi "ReferenceError" collection_output.txt; then - ERROR_TYPE="ReferenceError" - else - ERROR_TYPE="ExecutionError" - fi - - ERROR_DETAILS=$(head -40 collection_output.txt | tr '\n' ' ' | sed 's/"/\\"/g') - echo "::warning::Test discovery errors detected in target branch via Jest ($ERROR_TYPE)" - echo "::warning::First details: ${ERROR_DETAILS:0:200}..." - else - TEST_COUNT=$(grep -cv '^[[:space:]]*$' collection_output.txt || echo "0") - if [ "$TEST_COUNT" = "0" ]; then - NO_TESTS_FOUND="true" - ERROR_TYPE="NoTestsFound" - ERROR_DETAILS="Jest --listTests did not return any test files" - echo "::warning::No tests were found in the target branch" - else - echo "Found $TEST_COUNT test file(s) in target branch" - fi - fi - - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT - - if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT - else - echo "has_errors=false" >> $GITHUB_OUTPUT + if [ "$JEST_EXIT" -eq 137 ]; then + echo "::warning::Tests were killed (exit 137) - likely OOM. Partial results may be available." fi - echo "=== FULL COLLECTION OUTPUT ===" >> debug_target_collection.log - cat collection_output.txt >> debug_target_collection.log - working-directory: ${{ inputs['working-directory'] }} - - - name: Run tests on target branch - if: steps.check-collection.outputs.has_collection_errors != 'true' - run: | - echo "Running Jest tests on target branch..." - JEST_COMMAND="${{ inputs['jest-command'] }}" - JEST_EXTRA_ARGS="${{ inputs['jest-extra-args'] }}" - TEST_COMMAND="$JEST_COMMAND $JEST_EXTRA_ARGS --json --outputFile=target_results.json --testLocationInResults" - echo "Executing: $TEST_COMMAND" - - set +e - eval "$TEST_COMMAND" > target_test_output.txt 2>&1 - EXIT_CODE=$? - set -e - - if [ -f target_results.json ]; then - echo "✅ Test execution completed (exit code: $EXIT_CODE)" + if [ -f results.json ]; then + echo "Test execution completed (exit code: $JEST_EXIT)" else - echo "❌ Test execution did not produce target_results.json (exit code: $EXIT_CODE)" + echo "No results.json - creating empty results file" + echo '{"numTotalTests": 0, "numPassedTests": 0, "testResults": []}' > results.json fi working-directory: ${{ inputs['working-directory'] }} - - name: Extract test results and create artifacts + - name: Extract test results id: extract-results - # Only run if there were no collection errors - if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - echo "Processing test results for target branch: ${{ inputs.target_branch_to_compare }}" - - # Create debug file for detailed output - exec 3>&1 4>&2 - exec 1> >(tee -a debug_target_extract_results.log) 2>&1 - node <<'NODE' const fs = require('fs'); const path = require('path'); - const resultsPath = path.resolve(process.cwd(), 'target_results.json'); - let targetTotal = 0; - let targetPassed = 0; - let targetPercentage = 0; + let total = 0; + let passed = 0; + let percentage = 0; const passingTests = []; const failingTests = []; + const errorTests = []; const skippedTests = []; const xfailedTests = []; - const skippedTestsWithReasons = {}; - const xfailedTestsWithReasons = {}; + const xpassedTests = []; const allTests = []; - let warningsList = []; + const skippedWithReasons = {}; + const xfailedWithReasons = {}; + const warningsList = []; const safeRead = (filePath) => { try { @@ -596,16 +342,16 @@ jobs: } }; - const rawResults = safeRead(resultsPath); + const rawResults = safeRead('results.json'); if (rawResults) { try { const data = JSON.parse(rawResults); if (typeof data.numTotalTests === 'number') { - targetTotal = data.numTotalTests; + total = data.numTotalTests; } if (typeof data.numPassedTests === 'number') { - targetPassed = data.numPassedTests; + passed = data.numPassedTests; } const suites = Array.isArray(data.testResults) ? data.testResults : []; @@ -632,7 +378,7 @@ jobs: titleParts.push(String(assertion.title)); } - const fullName = titleParts.join(' › ') || assertion.fullName || assertion.title || 'Unnamed test'; + const fullName = titleParts.join(' > ') || assertion.fullName || assertion.title || 'Unnamed test'; const identifier = relativeSuitePath ? `${relativeSuitePath}::${fullName}` : fullName; @@ -644,613 +390,104 @@ jobs: passingTests.push(identifier); break; case 'failed': - failingTests.push(identifier); + // Check if it's an error (test threw) vs assertion failure + if (assertion.failureMessages && assertion.failureMessages.some(m => + m.includes('Error:') && !m.includes('AssertionError') && !m.includes('expect('))) { + errorTests.push(identifier); + } else { + failingTests.push(identifier); + } break; case 'pending': case 'skipped': - case 'todo': skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = - assertion.status === 'todo' - ? 'Marked as todo in Jest suite' - : 'Test marked as skipped/pending in Jest output'; + skippedWithReasons[identifier] = 'Test marked as skipped/pending in Jest output'; + break; + case 'todo': + // Jest's todo is similar to xfail + xfailedTests.push(identifier); + xfailedWithReasons[identifier] = 'Marked as todo in Jest suite'; break; default: skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = 'Test not executed (treated as skipped)'; + skippedWithReasons[identifier] = 'Test not executed (treated as skipped)'; break; } } } - if (!targetTotal && allTests.length) { - targetTotal = allTests.length; + if (!total && allTests.length) { + total = allTests.length; } - if (!targetPassed && passingTests.length) { - targetPassed = passingTests.length; + if (!passed && passingTests.length) { + passed = passingTests.length; } - targetPercentage = targetTotal > 0 ? (targetPassed / targetTotal) * 100 : 0; + percentage = total > 0 ? (passed / total) * 100 : 0; - console.log(`Parsed target Jest results: ${targetPassed}/${targetTotal} passed (${targetPercentage.toFixed(2)}%)`); - console.log(`Passing tests: ${passingTests.length}`); - console.log(`Total discovered tests: ${allTests.length}`); + console.log(`Parsed Jest results: ${passed}/${total} passed (${percentage.toFixed(2)}%)`); } catch (error) { - console.log(`Error parsing target_results.json: ${error.message}`); + console.log(`Error parsing results.json: ${error.message}`); } } else { - console.log('target_results.json not found. No baseline data to parse.'); + console.log('results.json not found. No test data to parse.'); } - const outputContent = safeRead(path.resolve(process.cwd(), 'target_test_output.txt')); + // Extract warnings from test output + const outputContent = safeRead('test_output.txt'); if (outputContent) { const warnLines = outputContent .split('\n') .map((line) => line.trimEnd()) - .filter((line) => /\bWARN(?:ING)?\b/.test(line)); + .filter((line) => /\bWARN(?:ING)?\b/i.test(line)); if (warnLines.length) { - warningsList = warnLines.slice(0, 200); - console.log(`Collected ${warningsList.length} warning line(s) from target Jest output.`); + warningsList.push(...warnLines.slice(0, 200)); + console.log(`Collected ${warningsList.length} warning line(s) from Jest output.`); } } + // Save artifact data const testData = { passing_tests: passingTests, failing_tests: failingTests, + error_tests: errorTests, skipped_tests: skippedTests, xfailed_tests: xfailedTests, + xpassed_tests: xpassedTests, all_tests: allTests, - skipped_tests_with_reasons: skippedTestsWithReasons, - xfailed_tests_with_reasons: xfailedTestsWithReasons, + skipped_tests_with_reasons: skippedWithReasons, + xfailed_tests_with_reasons: xfailedWithReasons, warnings: warningsList, }; - fs.writeFileSync('target_test_data.json', JSON.stringify(testData, null, 2)); - console.log('Wrote target_test_data.json with standardized Jest results.'); + fs.writeFileSync('test_data.json', JSON.stringify(testData, null, 2)); + + console.log(`Results: ${passed}/${total} (${percentage.toFixed(2)}%)`); - const percentageString = Number.isFinite(targetPercentage) ? targetPercentage.toFixed(2) : '0.00'; const outputLines = [ - `total=${targetTotal}`, - `passed=${targetPassed}`, - `percentage=${percentageString}`, - `passing_count=${passingTests.length}`, + `total=${total}`, + `passed=${passed}`, + `percentage=${percentage.toFixed(2)}`, + `failing_count=${failingTests.length}`, + `error_count=${errorTests.length}`, + `skipped_count=${skippedTests.length}`, + `xfailed_count=${xfailedTests.length}`, ]; if (process.env.GITHUB_OUTPUT) { fs.appendFileSync(process.env.GITHUB_OUTPUT, `${outputLines.join('\n')}\n`); } NODE - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Target branch test results processed: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} tests passed (${{ steps.extract-results.outputs.percentage }}%)" working-directory: ${{ inputs['working-directory'] }} - - name: Upload target branch artifacts + - name: Upload test artifacts if: always() uses: actions/upload-artifact@v4 with: - name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} + name: ${{ inputs.artifact_name }} path: | - ${{ inputs['working-directory'] }}/target_test_data.json - ${{ inputs['working-directory'] }}/target_test_output.txt - ${{ inputs['working-directory'] }}/target_results.json - ${{ inputs['working-directory'] }}/collection_output.txt - ${{ inputs['working-directory'] }}/debug_target_collection.log - ${{ inputs['working-directory'] }}/debug_target_extract_results.log + ${{ inputs['working-directory'] }}/test_data.json + ${{ inputs['working-directory'] }}/test_output.txt + ${{ inputs['working-directory'] }}/results.json retention-days: 3 if-no-files-found: ignore - - # Add a step to set default outputs when collection errors are detected - - name: Set collection error outputs - id: set-error-outputs - if: steps.check-collection.outputs.has_collection_errors == 'true' - run: | - echo "::warning::Setting default outputs for target branch due to collection errors" - echo "total=0" >> $GITHUB_OUTPUT - echo "passed=0" >> $GITHUB_OUTPUT - echo "percentage=0.00" >> $GITHUB_OUTPUT - echo "passing_count=0" >> $GITHUB_OUTPUT - - compare-results: - needs: [test-source-branch, test-target-branch] - uses: ./.github/workflows/regression-test.yml - with: - runs_on: ${{ inputs.runs_on }} - baseline_label: ${{ inputs.target_branch_to_compare }} - baseline_results_artifact: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - baseline_results_filename: target_test_data.json - current_label: ${{ github.head_ref || github.ref_name || 'source branch' }} - current_results_artifact: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - current_results_filename: pr_test_data.json - baseline_passed: ${{ needs.test-target-branch.outputs.passed }} - baseline_total: ${{ needs.test-target-branch.outputs.total }} - baseline_percentage: ${{ needs.test-target-branch.outputs.percentage }} - current_passed: ${{ needs.test-source-branch.outputs.passed }} - current_total: ${{ needs.test-source-branch.outputs.total }} - current_percentage: ${{ needs.test-source-branch.outputs.percentage }} - baseline_collection_errors: ${{ needs.test-target-branch.outputs.collection_errors }} - baseline_no_tests_found: ${{ needs.test-target-branch.outputs.no_tests_found }} - current_collection_errors: ${{ needs.test-source-branch.outputs.collection_errors }} - current_no_tests_found: ${{ needs.test-source-branch.outputs.no_tests_found }} - artifact_name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - - perform-regression-analysis: - needs: [test-source-branch, test-target-branch] - uses: ./.github/workflows/meta-regression-analysis.yml - with: - item_type_singular: "test" - item_type_plural: "tests" - pr_number: ${{ github.event.pull_request.number }} - run_id: ${{ github.run_id }} - target_branch_artifact_name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - pr_branch_artifact_name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - - # Conditionally run notification job only if needed - prepare-notification: - name: Prepare Notification Data - needs: - [ - test-source-branch, - test-target-branch, - compare-results, - perform-regression-analysis, - ] - # Notify on collection errors, no tests found, compare result failure, or if regressions are detected - if: | - always() && - ( - needs.test-source-branch.outputs.collection_errors == 'true' || - needs.test-source-branch.outputs.no_tests_found == 'true' || - needs.compare-results.result == 'failure' || - needs.perform-regression-analysis.outputs.has_regressions == 'true' - ) - runs-on: ${{ fromJSON(inputs.runs_on) }} - outputs: - message_body: ${{ steps.construct_notification.outputs.message_body_out }} - ping_user_ids: ${{ steps.construct_notification.outputs.ping_user_ids_out }} - artifact_path: ${{ steps.construct_notification.outputs.artifact_path_out }} - should_notify: "true" - webhook_available_for_alert: ${{ steps.check_webhook_availability.outputs.webhook_available }} - - steps: - - name: Check for Discord Webhook URL - id: check_webhook_availability - run: | - if [ -z "${{ secrets.DISCORD_WEBHOOK_URL }}" ]; then - echo "::notice::DISCORD_WEBHOOK_URL secret is not set. Discord notifications will likely be skipped by the alert workflow if it relies on this secret." - echo "webhook_available=false" >> $GITHUB_OUTPUT - else - echo "webhook_available=true" >> $GITHUB_OUTPUT - fi - - name: Download regression details (if any) - id: download_regressions - if: always() - uses: actions/download-artifact@v4 - with: - name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - path: . # Download to current directory - continue-on-error: true - - - name: Check downloaded regression file - if: always() - run: | - echo "Checking for regression details file..." - if [ -f "regression_details.txt" ]; then - echo "✅ Regression details file found" - echo "File size: $(wc -c < regression_details.txt) bytes" - echo "First few lines:" - head -5 regression_details.txt - else - echo "❌ Regression details file not found" - fi - - if [ -f "comprehensive_regression_report.txt" ]; then - echo "✅ Comprehensive regression report found" - echo "File size: $(wc -c < comprehensive_regression_report.txt) bytes" - else - echo "❌ Comprehensive regression report not found" - fi - - - name: Construct Discord Notification - id: construct_notification - env: - LINT_RESULT: ${{ needs.lint.result }} - SOURCE_TEST_RESULT: ${{ needs.test-source-branch.result }} - TARGET_TEST_RESULT: ${{ needs.test-target-branch.result }} - COMPARE_RESULT: ${{ needs.compare-results.result }} - PR_COLLECTION_ERRORS: ${{ needs.test-source-branch.outputs.collection_errors }} - PR_NO_TESTS_FOUND: ${{ needs.test-source-branch.outputs.no_tests_found }} - PR_ERROR_TYPE: ${{ needs.test-source-branch.outputs.error_type }} - PR_ERROR_DETAILS_TRUNCATED: ${{ needs.test-source-branch.outputs.error_details }} - HAS_REGRESSIONS: ${{ needs.perform-regression-analysis.outputs.has_regressions }} - REGRESSION_COUNT: ${{ needs.perform-regression-analysis.outputs.regression_count }} - PR_TOTAL_TESTS: ${{ needs.test-source-branch.outputs.total }} - PR_PASSED_TESTS: ${{ needs.test-source-branch.outputs.passed }} - PR_PERCENTAGE: ${{ needs.test-source-branch.outputs.percentage }} - TARGET_TOTAL_TESTS: ${{ needs.test-target-branch.outputs.total }} - TARGET_PASSED_TESTS: ${{ needs.test-target-branch.outputs.passed }} - TARGET_PERCENTAGE: ${{ needs.test-target-branch.outputs.percentage }} - PR_NUMBER: ${{ github.event.pull_request.number }} - PR_TITLE: ${{ github.event.pull_request.title }} - PR_URL: ${{ github.event.pull_request.html_url }} - TARGET_BRANCH_NAME: ${{ inputs.target_branch_to_compare }} - PR_BRANCH_NAME: ${{ github.head_ref }} - REPO_URL: ${{ github.server_url }}/${{ github.repository }} - ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - GH_ASSIGNEES_JSON: ${{ toJson(github.event.pull_request.assignees) }} - USER_MAP_JSON: ${{ secrets.DISCORD_USER_MAP || '{}' }} - REGRESSION_FILE_PATH: "regression_details.txt" - DOWNLOAD_REGRESSIONS_OUTCOME: ${{ steps.download_regressions.outcome }} - INPUT_PING_LATEST_COMMITTER: ${{ inputs.ping_latest_committer }} - run: | - # Create debug file for detailed notification construction - exec 3>&1 4>&2 - exec 1> >(tee -a debug_notification_construction.log) 2>&1 - - MESSAGE_LINES=() # Use an array to build message lines - PING_KEYS_OUTPUT="" # Will be comma-separated GitHub logins - ARTIFACT_PATH_OUTPUT="" - - echo "Raw GH_ASSIGNEES_JSON value: [$GH_ASSIGNEES_JSON]" - echo "Raw USER_MAP_JSON value: [$USER_MAP_JSON]" - - # 1. Determine Pings - Collect GitHub Logins to pass to alert-discord.yml - # Initialize PING_KEYS_OUTPUT - PING_KEYS_OUTPUT="" - - # Add assignees to PING_KEYS_OUTPUT - if [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && command -v jq &> /dev/null; then - ASSIGNEE_LOGINS_ARRAY=($(echo "$GH_ASSIGNEES_JSON" | jq -r '.[].login // empty')) - echo "GH_ASSIGNEES_JSON received: $GH_ASSIGNEES_JSON" - echo "Extracted ASSIGNEE_LOGINS_ARRAY: (${ASSIGNEE_LOGINS_ARRAY[*]})" - echo "Count of assignees extracted: ${#ASSIGNEE_LOGINS_ARRAY[@]}" - - MAPPED_ASSIGNEE_COUNT=0 - TEMP_PING_KEYS=() - - for assignee_login in "${ASSIGNEE_LOGINS_ARRAY[@]}"; do - if [ -z "$assignee_login" ]; then - echo "Skipping empty assignee login." - continue - fi - echo "Processing assignee for ping: '$assignee_login'" - # Check if this assignee_login exists as a key in USER_MAP_JSON - if echo "$USER_MAP_JSON" | jq -e --arg K "$assignee_login" '.[$K]' > /dev/null; then - echo "Assignee '$assignee_login' FOUND in USER_MAP_JSON." - TEMP_PING_KEYS+=("$assignee_login") - MAPPED_ASSIGNEE_COUNT=$((MAPPED_ASSIGNEE_COUNT + 1)) - else - echo "Assignee '$assignee_login' NOT FOUND in USER_MAP_JSON." - fi - done - - echo "Total assignees found in USER_MAP_JSON and added to pings: $MAPPED_ASSIGNEE_COUNT" - - if [ ${#TEMP_PING_KEYS[@]} -gt 0 ]; then - PING_KEYS_OUTPUT=$(IFS=,; echo "${TEMP_PING_KEYS[*]}") - echo "Initial PING_KEYS_OUTPUT from assignees: [$PING_KEYS_OUTPUT]" - else - echo "No assignees found or GH_ASSIGNEES_JSON was empty, or no assignees were found in USER_MAP_JSON." - fi - elif [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && ! command -v jq &> /dev/null; then - echo "::warning::jq is not available. Cannot determine GitHub users (assignees) for pings." - else - echo "No user map JSON or jq not found. PING_KEYS_OUTPUT (from assignees) will be empty." - fi - - # Add latest committer if INPUT_PING_LATEST_COMMITTER is true - if [[ "$INPUT_PING_LATEST_COMMITTER" == "true" ]]; then - echo "INPUT_PING_LATEST_COMMITTER is true. Attempting to fetch latest committer for PR #${PR_NUMBER}." - if command -v gh &> /dev/null && [ -n "$PR_NUMBER" ]; then - LATEST_COMMITTER_LOGIN_RAW=$(gh pr view "$PR_NUMBER" --json commits --jq '.commits[-1].author.login' 2>/dev/null || echo "") - - if [ -n "$LATEST_COMMITTER_LOGIN_RAW" ] && [ "$LATEST_COMMITTER_LOGIN_RAW" != "null" ]; then - # Apply bot filter (e.g., names ending in [bot] or -bot) - LATEST_COMMITTER_LOGIN=$(echo "$LATEST_COMMITTER_LOGIN_RAW" | grep -v -E -i '(\[bot\]$|-bot$)' || echo "") - - if [ -n "$LATEST_COMMITTER_LOGIN" ]; then - echo "Latest committer identified: $LATEST_COMMITTER_LOGIN" - - # Check if this committer is already in PING_KEYS_OUTPUT - ALREADY_IN_LIST=0 - if [ -n "$PING_KEYS_OUTPUT" ]; then # Only check if PING_KEYS_OUTPUT is not empty - IFS=',' read -ra PING_ARRAY <<< "$PING_KEYS_OUTPUT" - for key in "${PING_ARRAY[@]}"; do - if [[ "$key" == "$LATEST_COMMITTER_LOGIN" ]]; then - ALREADY_IN_LIST=1 - break - fi - done - fi - - if [[ "$ALREADY_IN_LIST" -eq 0 ]]; then - if [ -z "$PING_KEYS_OUTPUT" ]; then - PING_KEYS_OUTPUT="$LATEST_COMMITTER_LOGIN" - else - PING_KEYS_OUTPUT="$PING_KEYS_OUTPUT,$LATEST_COMMITTER_LOGIN" - fi - echo "Added latest committer '$LATEST_COMMITTER_LOGIN' to PING_KEYS_OUTPUT. New list: [$PING_KEYS_OUTPUT]" - else - echo "Latest committer '$LATEST_COMMITTER_LOGIN' is already in PING_KEYS_OUTPUT (likely an assignee)." - fi - else - echo "Latest committer login '$LATEST_COMMITTER_LOGIN_RAW' was filtered out (likely a bot or pattern match) or empty after filter." - fi - else - echo "No latest committer login found for PR #$PR_NUMBER from gh command, or login was null." - fi - else - if ! command -v gh &> /dev/null; then - echo "::warning::gh command not available. Cannot fetch latest committer." - fi - if [ -z "$PR_NUMBER" ]; then - echo "::warning::PR_NUMBER is not set (event might not be a pull_request). Cannot fetch latest committer." - fi - fi - fi - - # Restore stdout/stderr for GitHub Actions to show final summary - exec 1>&3 2>&4 - - # Make this a standard echo for better visibility of the final list - echo "Final Ping Keys Output (GitHub Logins from test-js-jest.yml): [$PING_KEYS_OUTPUT]" - echo "ping_user_ids_out=$PING_KEYS_OUTPUT" >> $GITHUB_OUTPUT - - # Store branch names in variables with proper quoting - PR_BRANCH="${PR_BRANCH_NAME:-unknown}" - TARGET_BRANCH="${TARGET_BRANCH_NAME:-unknown}" - - # 2. Construct Message Body - MESSAGE_LINES+=("**Jest Comparison & Regression Analysis for PR [#${PR_NUMBER}: ${PR_TITLE}](${PR_URL})**") - MESSAGE_LINES+=("Branch: [\`${PR_BRANCH}\`](${REPO_URL}/tree/${PR_BRANCH}) against [\`${TARGET_BRANCH}\`](${REPO_URL}/tree/${TARGET_BRANCH})") - MESSAGE_LINES+=("---") - - # Job Status Summary - MESSAGE_LINES+=("**Job Status:**") - LINT_STATUS="Success" - if [[ "$LINT_RESULT" == "failure" ]]; then LINT_STATUS="Failed"; elif [[ "$LINT_RESULT" == "skipped" ]]; then LINT_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Linting: $LINT_STATUS") - - SOURCE_TEST_STATUS="Success" - if [[ "$SOURCE_TEST_RESULT" == "failure" ]]; then SOURCE_TEST_STATUS="Failed"; elif [[ "$SOURCE_TEST_RESULT" == "skipped" ]]; then SOURCE_TEST_STATUS="Skipped"; fi - MESSAGE_LINES+=("- PR Branch Tests (\`${PR_BRANCH}\`): $SOURCE_TEST_STATUS") - - TARGET_TEST_STATUS="Success" - if [[ "$TARGET_TEST_RESULT" == "failure" ]]; then TARGET_TEST_STATUS="Failed"; elif [[ "$TARGET_TEST_RESULT" == "skipped" ]]; then TARGET_TEST_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Target Branch Tests (\`${TARGET_BRANCH}\`): $TARGET_TEST_STATUS") - - COMPARE_STATUS="Success" - if [[ "$COMPARE_RESULT" == "failure" ]]; then COMPARE_STATUS="Failed"; elif [[ "$COMPARE_RESULT" == "skipped" ]]; then COMPARE_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Comparison & Regression: $COMPARE_STATUS") - MESSAGE_LINES+=("---") - - # Test Discovery Issues in PR Branch - if [[ "$PR_COLLECTION_ERRORS" == "true" ]]; then - MESSAGE_LINES+=("**:red_circle: ERROR: Test Discovery Failed in PR Branch (\`${PR_BRANCH}\`)**") - MESSAGE_LINES+=(" - Type: \`${PR_ERROR_TYPE}\`") - MESSAGE_LINES+=(" - This usually indicates import errors or syntax issues preventing tests from being collected.") - MESSAGE_LINES+=(" - See attached file for detailed error information.") - elif [[ "$PR_NO_TESTS_FOUND" == "true" ]]; then - MESSAGE_LINES+=("**:warning: WARNING: No Tests Found in PR Branch (\`${PR_BRANCH}\`)**") - MESSAGE_LINES+=(" - Jest did not discover any test files matching its patterns.") - MESSAGE_LINES+=(" - Ensure your test files are correctly named (e.g., \`test_*.py\` or \`*_test.py\`) and located.") - fi - - # Regression Analysis Summary - if [[ "$HAS_REGRESSIONS" == "true" ]]; then - MESSAGE_LINES+=("**:red_circle: REGRESSIONS DETECTED**") - - # Check if we have comprehensive regression file with categories - if [ -f "comprehensive_regression_report.txt" ]; then - # Extract counts from comprehensive report - PASS_FAIL_COUNT=$(grep -o "PASS-TO-FAIL REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - PASS_SKIP_COUNT=$(grep -o "PASS-TO-SKIP REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_SKIP_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-SKIP IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_PASS_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-PASS IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - PASS_GONE_COUNT=$(grep -o "PASS-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_GONE_COUNT=$(grep -o "FAIL-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - DISCOVERY_COUNT=$(grep -o "DISCOVERY REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - NEW_TESTS_COUNT=$(grep -o "NEW TESTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - - # Add category summaries (≤5 show paths, >5 show count + refer to file) - if [[ "$PASS_FAIL_COUNT" -gt 0 ]]; then - if [[ "$PASS_FAIL_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Fail ($PASS_FAIL_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-FAIL REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_FAIL_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Fail:** $PASS_FAIL_COUNT tests (see attached file)") - fi - fi - - if [[ "$PASS_SKIP_COUNT" -gt 0 ]]; then - if [[ "$PASS_SKIP_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Skip ($PASS_SKIP_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-SKIP REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_SKIP_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Skip:** $PASS_SKIP_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements ($FAIL_SKIP_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements:** $FAIL_SKIP_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements ($FAIL_PASS_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-PASS IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_PASS_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements:** $FAIL_PASS_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$PASS_GONE_COUNT" -gt 0 ]]; then - if [[ "$PASS_GONE_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Gone ($PASS_GONE_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_GONE_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Gone:** $PASS_GONE_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_GONE_COUNT" -gt 0 ]]; then - if [[ "$FAIL_GONE_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Fail→Gone ($FAIL_GONE_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_GONE_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Fail→Gone:** $FAIL_GONE_COUNT tests (see attached file)") - fi - fi - - if [[ "$DISCOVERY_COUNT" -gt 0 ]]; then - if [[ "$DISCOVERY_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Discovery Warnings ($DISCOVERY_COUNT):**") - MESSAGE_LINES+=("• $DISCOVERY_COUNT new warnings (see attached file)") - else - MESSAGE_LINES+=("**Discovery Warnings:** $DISCOVERY_COUNT warnings (see attached file)") - fi - fi - - if [[ "$NEW_TESTS_COUNT" -gt 0 ]]; then - if [[ "$NEW_TESTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":sparkles: **New Tests ($NEW_TESTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "NEW TESTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$NEW_TESTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":sparkles: **New Tests:** $NEW_TESTS_COUNT tests (see attached file)") - fi - fi - - else - # Fallback to simple regression count - MESSAGE_LINES+=(" - **${REGRESSION_COUNT} test(s)** that were passing in \`${TARGET_BRANCH}\` are now **failing** in \`${PR_BRANCH}\`.") - fi - elif [[ "$COMPARE_RESULT" == "failure" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then - # This case handles general comparison failures NOT due to specific regressions - MESSAGE_LINES+=("**:warning: TEST RESULTS DECLINED**") - MESSAGE_LINES+=(" - The PR branch shows a decrease in test success compared to the target branch, but no specific regressions were identified by the \`meta-regression-analysis\` job.") - MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**") - MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**") - elif [[ "$COMPARE_RESULT" == "success" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then - MESSAGE_LINES+=("**:white_check_mark: NO REGRESSIONS DETECTED**") - MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**") - MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**") - fi - - if [[ "$HAS_REGRESSIONS" != "true" ]] && [ -f "comprehensive_regression_report.txt" ]; then - FAIL_SKIP_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-SKIP IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_PASS_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-PASS IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - NEW_TESTS_COUNT=$(grep -o "NEW TESTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 || "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 || "$NEW_TESTS_COUNT" -gt 0 ]]; then - MESSAGE_LINES+=("**:sparkles: Improvements & Additions**") - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements ($FAIL_SKIP_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements:** $FAIL_SKIP_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements ($FAIL_PASS_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-PASS IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_PASS_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements:** $FAIL_PASS_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$NEW_TESTS_COUNT" -gt 0 ]]; then - if [[ "$NEW_TESTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":sparkles: **New Tests ($NEW_TESTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "NEW TESTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$NEW_TESTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":sparkles: **New Tests:** $NEW_TESTS_COUNT tests (see attached file)") - fi - fi - fi - fi - - MESSAGE_LINES+=("---") - MESSAGE_LINES+=("[View Workflow Run](${ACTION_RUN_URL})") - - # Set artifact path - always prefer comprehensive report if it exists - if [ -f "comprehensive_regression_report.txt" ]; then - ARTIFACT_PATH_OUTPUT="comprehensive_regression_report.txt" - elif [ -f "$REGRESSION_FILE_PATH" ] && [[ "$DOWNLOAD_REGRESSIONS_OUTCOME" == "success" ]]; then - ARTIFACT_PATH_OUTPUT="$REGRESSION_FILE_PATH" - else - ARTIFACT_PATH_OUTPUT="" - fi - - # Construct with actual newlines - FINAL_MESSAGE_BODY=$(printf "%s\\n" "${MESSAGE_LINES[@]}") - if [ ${#MESSAGE_LINES[@]} -gt 0 ]; then - # Remove the very last actual newline - FINAL_MESSAGE_BODY="${FINAL_MESSAGE_BODY%\\n}" - fi - - echo "Final message body prepared in test-js-jest.yml" - - echo "message_body_out<> $GITHUB_OUTPUT - echo "$FINAL_MESSAGE_BODY" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - echo "artifact_path_out=$ARTIFACT_PATH_OUTPUT" >> $GITHUB_OUTPUT - - notify-discord: - name: Send Discord Notification - needs: [prepare-notification] - if: | - always() && - needs.prepare-notification.outputs.should_notify == 'true' && - needs.prepare-notification.outputs.webhook_available_for_alert == 'true' - uses: ./.github/workflows/alert-discord.yml - with: - message_body: ${{ needs.prepare-notification.outputs.message_body }} - ping_user_ids: ${{ needs.prepare-notification.outputs.ping_user_ids }} - artifact_paths: ${{ needs.prepare-notification.outputs.artifact_path }} - should_notify: ${{ needs.prepare-notification.outputs.should_notify }} - runs_on: ${{ inputs.runs_on }} - secrets: - DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }} - DISCORD_USER_MAP: ${{ secrets.DISCORD_USER_MAP }} diff --git a/.github/workflows/test-js-mocha.yml b/.github/workflows/test-js-mocha.yml index b47c52f..e1ce59a 100644 --- a/.github/workflows/test-js-mocha.yml +++ b/.github/workflows/test-js-mocha.yml @@ -1,17 +1,32 @@ -name: Reusable Compare Mocha Results +name: Reusable Mocha Runner on: workflow_call: inputs: - target_branch_to_compare: - description: "The target branch to compare against (e.g., main, refs/heads/main)." - required: true + ref: + description: "Git ref to checkout and test. Leave empty for default checkout." + required: false type: string + default: "" node-version: description: "Node.js version to use for testing." required: false type: string default: "18" + runs_on: + description: "Runner label for the test job." + required: false + type: string + default: '["self-hosted", "multithreaded"]' + artifact_name: + description: "Name for the test results artifact." + required: true + type: string + parallel_workers: + description: "Number of parallel workers for Mocha. Leave empty for runner default (6 for multithreaded, 1 for singlethreaded). Use 'auto' for cgroup-aware CPU count, or a number." + required: false + type: string + default: "" install-command: description: "Optional command to install dependencies (defaults to npm/pnpm/yarn auto-detection)." required: false @@ -26,62 +41,49 @@ on: description: "Additional arguments to pass to the Mocha command (applied before workflow-managed flags)." required: false type: string - default: "--reporter json" + default: "" working-directory: description: "Directory where install and test commands should be executed." required: false type: string default: "." - ping_latest_committer: - description: "If true, the latest committer on the PR will be added to the ping list." - required: false - type: boolean - default: false - runs_on: - required: false - type: string - default: '["self-hosted", "multithreaded"]' - secrets: - DISCORD_WEBHOOK_URL: - description: "Discord Webhook URL for failure notifications. If not provided, notifications are skipped." - required: false - DISCORD_USER_MAP: - description: 'JSON string mapping GitHub usernames to Discord User IDs (e.g., {"user1":"id1"}). If not provided, users won''t be pinged.' - required: false outputs: - pr_total: - description: "Total tests in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.total }} - pr_passed: - description: "Passed tests in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.passed }} - pr_percentage: - description: "Pass percentage in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.percentage }} - pr_collection_errors: - description: "PR branch has collection errors" - value: ${{ jobs.test-source-branch.outputs.collection_errors }} - pr_no_tests_found: - description: "PR branch has no tests found" - value: ${{ jobs.test-source-branch.outputs.no_tests_found }} - target_total: - description: "Total tests in target branch" - value: ${{ jobs.test-target-branch.outputs.total }} - target_passed: - description: "Passed tests in target branch" - value: ${{ jobs.test-target-branch.outputs.passed }} - target_percentage: - description: "Pass percentage in target branch" - value: ${{ jobs.test-target-branch.outputs.percentage }} - has_regressions: - description: "Boolean indicating if regressions were found" - value: ${{ jobs.compare-results.outputs.has_regressions }} - regression_count: - description: "Number of test regressions found" - value: ${{ jobs.compare-results.outputs.regression_count }} + total: + description: "Total number of tests" + value: ${{ jobs.test.outputs.total }} + passed: + description: "Number of passing tests" + value: ${{ jobs.test.outputs.passed }} + percentage: + description: "Pass percentage" + value: ${{ jobs.test.outputs.percentage }} + collection_errors: + description: "Whether collection errors occurred" + value: ${{ jobs.test.outputs.collection_errors }} + no_tests_found: + description: "Whether no tests were found" + value: ${{ jobs.test.outputs.no_tests_found }} + has_errors: + description: "Whether any errors occurred" + value: ${{ jobs.test.outputs.has_errors }} + error_type: + description: "Type of error if any" + value: ${{ jobs.test.outputs.error_type }} + failing_count: + description: "Number of failing tests" + value: ${{ jobs.test.outputs.failing_count }} + error_count: + description: "Number of errored tests" + value: ${{ jobs.test.outputs.error_count }} + skipped_count: + description: "Number of skipped tests" + value: ${{ jobs.test.outputs.skipped_count }} + xfailed_count: + description: "Number of xfailed tests (pending in Mocha)" + value: ${{ jobs.test.outputs.xfailed_count }} jobs: - test-source-branch: + test: runs-on: ${{ fromJSON(inputs.runs_on) }} outputs: total: ${{ steps.extract-results.outputs.total }} @@ -91,16 +93,17 @@ jobs: no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} has_errors: ${{ steps.check-collection.outputs.has_errors }} error_type: ${{ steps.check-collection.outputs.error_type }} - error_details: ${{ steps.check-collection.outputs.error_details }} failing_count: ${{ steps.extract-results.outputs.failing_count }} + error_count: ${{ steps.extract-results.outputs.error_count }} skipped_count: ${{ steps.extract-results.outputs.skipped_count }} xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} steps: - - name: Checkout PR Branch + - name: Checkout uses: actions/checkout@v4.2.2 with: submodules: "recursive" + ref: ${{ inputs.ref || github.ref }} - name: Set up Node.js uses: actions/setup-node@v4 @@ -150,17 +153,10 @@ jobs: HAS_COLLECTION_ERRORS="false" NO_TESTS_FOUND="false" ERROR_TYPE="none" - ERROR_DETAILS="none" MOCHA_COMMAND="${{ inputs['mocha-command'] }}" MOCHA_EXTRA_ARGS="${{ inputs['mocha-extra-args'] }}" - - # Ensure the Mocha command includes a JSON reporter so that discovery output is machine readable - if ! echo "$MOCHA_EXTRA_ARGS" | grep -q "--reporter"; then - MOCHA_EXTRA_ARGS="$MOCHA_EXTRA_ARGS --reporter json" - fi - - COLLECTION_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS --dry-run" + COLLECTION_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS --reporter json --dry-run" echo "Executing: $COLLECTION_COMMAND" @@ -182,484 +178,170 @@ jobs: else ERROR_TYPE="ExecutionError" fi - - ERROR_DETAILS=$(head -40 collection_output.txt | tr '\n' ' ' | sed 's/"/\\"/g') - echo "::error::Test discovery errors detected in PR branch via Mocha ($ERROR_TYPE)" - echo "::error::First details: ${ERROR_DETAILS:0:200}..." + echo "::error::Test discovery errors detected via Mocha ($ERROR_TYPE)" else - NODE <<'NODE' - const fs = require('fs'); - const path = require('path'); - - const outputPath = path.resolve(process.cwd(), 'collection_output.json'); - let discoveredCount = 0; - - try { - const raw = fs.readFileSync(outputPath, 'utf-8'); - if (raw.trim()) { - const data = JSON.parse(raw); - if (data && data.stats && typeof data.stats.tests === 'number') { - discoveredCount = data.stats.tests; - } else if (Array.isArray(data.tests)) { - discoveredCount = data.tests.length; + # Parse JSON output to count tests + TEST_COUNT=0 + if [ -s collection_output.json ]; then + TEST_COUNT=$(node -e " + try { + const data = JSON.parse(require('fs').readFileSync('collection_output.json', 'utf-8')); + console.log(data.stats?.tests || (data.tests?.length || 0)); + } catch (e) { + console.log(0); } - } - } catch (error) { - console.log(`::warning::Unable to parse Mocha discovery JSON: ${error.message}`); - } - - fs.writeFileSync( - process.env.GITHUB_OUTPUT, - `tests_discovered=${discoveredCount}\n`, - { flag: 'a' } - ); - NODE - - TEST_COUNT=$(grep -o "tests_discovered=[0-9]*" $GITHUB_OUTPUT | tail -1 | cut -d'=' -f2) - if [ -z "$TEST_COUNT" ]; then - TEST_COUNT=0 + " 2>/dev/null || echo "0") fi if [ "$TEST_COUNT" = "0" ]; then NO_TESTS_FOUND="true" ERROR_TYPE="NoTestsFound" - ERROR_DETAILS="Mocha --dry-run did not discover any tests" - echo "::warning::No tests were found in the PR branch" + echo "::warning::No tests were found" else - echo "Found $TEST_COUNT test(s) in PR branch" + echo "Found $TEST_COUNT test(s)" fi fi - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT - + echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> "$GITHUB_OUTPUT" + echo "no_tests_found=$NO_TESTS_FOUND" >> "$GITHUB_OUTPUT" + echo "error_type=$ERROR_TYPE" >> "$GITHUB_OUTPUT" if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT + echo "has_errors=true" >> "$GITHUB_OUTPUT" else - echo "has_errors=false" >> $GITHUB_OUTPUT - fi - - if [[ "$HAS_COLLECTION_ERRORS" == "true" ]]; then - echo "❌ Discovery Error: $ERROR_TYPE" - elif [[ "$NO_TESTS_FOUND" == "true" ]]; then - echo "⚠️ No Tests Found" - else - echo "✅ Discovery Success" + echo "has_errors=false" >> "$GITHUB_OUTPUT" fi working-directory: ${{ inputs['working-directory'] }} - - name: Run tests on PR Branch + - name: Run tests + id: run-tests + continue-on-error: true if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - echo "Running Mocha tests on PR branch..." - MOCHA_COMMAND="${{ inputs['mocha-command'] }}" - MOCHA_EXTRA_ARGS="${{ inputs['mocha-extra-args'] }}" - - if ! echo "$MOCHA_EXTRA_ARGS" | grep -q "--reporter"; then - MOCHA_EXTRA_ARGS="$MOCHA_EXTRA_ARGS --reporter json" - fi - - TEST_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS" - echo "Executing: $TEST_COMMAND" - - set +e - eval "$TEST_COMMAND" > >(tee pr_results.json) 2> test_output.txt - EXIT_CODE=$? - set -e - - if [ -s pr_results.json ]; then - echo "✅ Test execution completed (exit code: $EXIT_CODE)" - else - echo "❌ Test execution did not produce pr_results.json (exit code: $EXIT_CODE)" - fi - working-directory: ${{ inputs['working-directory'] }} - - - name: Extract test results and create artifacts - id: extract-results - run: | - echo "PR_BRANCH=$(git rev-parse --abbrev-ref HEAD)" >> $GITHUB_ENV - - node <<'NODE' - const fs = require('fs'); - const path = require('path'); - - const resultsPath = path.resolve(process.cwd(), 'pr_results.json'); - let prTotal = 0; - let prPassed = 0; - let prPercentage = 0; - const passingTests = []; - const failingTests = []; - const skippedTests = []; - const xfailedTests = []; - const skippedTestsWithReasons = {}; - const xfailedTestsWithReasons = {}; - const allTests = []; - const warningsList = []; - - const safeRead = (filePath) => { - try { - return fs.readFileSync(filePath, 'utf-8'); - } catch (error) { - return null; - } - }; - - const rawResults = safeRead(resultsPath); - - if (rawResults) { - try { - const data = JSON.parse(rawResults); - const stats = data?.stats ?? {}; - const tests = Array.isArray(data?.tests) ? data.tests : []; - - prTotal = Number.isFinite(stats.tests) ? Number(stats.tests) : tests.length; - prPassed = Number.isFinite(stats.passes) - ? Number(stats.passes) - : Array.isArray(data?.passes) - ? data.passes.length - : 0; - - for (const test of tests) { - const suitePath = test.file ? path.relative(process.cwd(), test.file) || test.file : ''; - const fullTitle = test.fullTitle || test.title || 'Unnamed test'; - const identifier = suitePath ? `${suitePath}::${fullTitle}` : fullTitle; - const state = test.state || (test.pending ? 'pending' : undefined); - - allTests.push(identifier); + set -euo pipefail + + cgroup_auto_workers() { + local n="" + + # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " + if [ -f /sys/fs/cgroup/cpu.max ]; then + local quota period + quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" + period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" + if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi - switch (state) { - case 'passed': - passingTests.push(identifier); - break; - case 'failed': - failingTests.push(identifier); - break; - case 'pending': - skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = - (test.err && test.err.message) || 'Marked as pending in Mocha output'; - break; - default: { - const matchBy = (collection) => - Array.isArray(collection) - ? collection.find( - (item) => - item && - item.fullTitle === test.fullTitle && - (item.file === test.file || (!item.file && !test.file)), - ) - : undefined; + # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us + if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then + local quota period + quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" + period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" + if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi - if (!state) { - if (matchBy(data?.failures)) { - failingTests.push(identifier); - break; - } - if (matchBy(data?.passes)) { - passingTests.push(identifier); - break; - } - const pendingMatch = matchBy(data?.pending); - if (pendingMatch) { - skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = - (pendingMatch.err && pendingMatch.err.message) || 'Marked as pending in Mocha output'; - break; - } - } + # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) + if [ -z "$n" ]; then + local f="" + if [ -f /sys/fs/cgroup/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset.cpus" + elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset/cpuset.cpus" + fi - skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = 'Test state unknown; treated as skipped'; - break; - } - } - } + if [ -n "$f" ]; then + local spec + spec="$(cat "$f" | tr -d '[:space:]')" + if [ -n "$spec" ]; then + local count=0 + IFS=',' read -r -a parts <<< "$spec" + for p in "${parts[@]}"; do + if [[ "$p" == *-* ]]; then + local a="${p%%-*}" + local b="${p##*-}" + if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then + count=$((count + b - a + 1)) + fi + elif [[ "$p" =~ ^[0-9]+$ ]]; then + count=$((count + 1)) + fi + done + if [ "$count" -gt 0 ]; then + n="$count" + fi + fi + fi + fi - if (!prPassed && passingTests.length) { - prPassed = passingTests.length; - } + if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then + n="1" + fi - if (prTotal > 0) { - prPercentage = (prPassed / prTotal) * 100; - } - } catch (error) { - console.error(`Failed to parse Mocha JSON results: ${error.message}`); - } + echo "$n" } - const summary = { - total: prTotal, - passed: prPassed, - percentage: Number.isFinite(prPercentage) ? prPercentage.toFixed(2) : '0.00', - }; - - const output = { - summary, - passing_tests: passingTests, - failing_tests: failingTests, - skipped_tests: skippedTests, - xfailed_tests: xfailedTests, - all_tests: allTests, - skipped_tests_with_reasons: skippedTestsWithReasons, - xfailed_tests_with_reasons: xfailedTestsWithReasons, - warnings: warningsList, - }; - - fs.writeFileSync('pr_test_data.json', JSON.stringify(output, null, 2)); - - const outputLines = [ - `total=${prTotal}`, - `passed=${prPassed}`, - `percentage=${Number.isFinite(prPercentage) ? prPercentage.toFixed(2) : '0.00'}`, - `failing_count=${failingTests.length}`, - `skipped_count=${skippedTests.length}`, - `xfailed_count=${xfailedTests.length}`, - ]; - - fs.appendFileSync(process.env.GITHUB_OUTPUT, `${outputLines.join('\n')}\n`); - NODE - - echo "✅ Test results: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} passed (${{ steps.extract-results.outputs.percentage }}%)" - working-directory: ${{ inputs['working-directory'] }} - - - name: Upload PR branch artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - path: | - ${{ inputs['working-directory'] }}/pr_test_data.json - ${{ inputs['working-directory'] }}/test_output.txt - ${{ inputs['working-directory'] }}/pr_results.json - ${{ inputs['working-directory'] }}/collection_output.json - ${{ inputs['working-directory'] }}/collection_output.txt - retention-days: 3 - if-no-files-found: ignore - - test-target-branch: - runs-on: ${{ fromJSON(inputs.runs_on) }} - outputs: - total: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.total || steps.extract-results.outputs.total }} - passed: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passed || steps.extract-results.outputs.passed }} - percentage: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.percentage || steps.extract-results.outputs.percentage }} - collection_errors: ${{ steps.check-collection.outputs.has_collection_errors }} - no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} - has_errors: ${{ steps.check-collection.outputs.has_errors }} - error_type: ${{ steps.check-collection.outputs.error_type }} - error_details: ${{ steps.check-collection.outputs.error_details }} - passing_count: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passing_count || steps.extract-results.outputs.passing_count }} - - steps: - - name: Checkout target branch - uses: actions/checkout@v4.2.2 - with: - submodules: "recursive" - ref: ${{ inputs.target_branch_to_compare }} - - - name: Set up Node.js - uses: actions/setup-node@v4 - with: - node-version: "${{ inputs['node-version'] }}" - - - name: Install dependencies - run: | - set -e - if command -v corepack >/dev/null 2>&1; then - corepack enable >/dev/null 2>&1 || true - fi - - INSTALL_COMMAND="${{ inputs['install-command'] }}" - if [ -n "$INSTALL_COMMAND" ]; then - echo "Running custom install command: $INSTALL_COMMAND" - eval "$INSTALL_COMMAND" - elif [ -f package-lock.json ]; then - echo "Detected package-lock.json; running npm ci" - npm ci - elif [ -f yarn.lock ]; then - if command -v yarn >/dev/null 2>&1; then - echo "Detected yarn.lock; running yarn install --frozen-lockfile" - yarn install --frozen-lockfile - else - echo "::warning::yarn.lock detected but yarn is unavailable. Falling back to npm install." - npm install - fi - elif [ -f pnpm-lock.yaml ]; then - if command -v pnpm >/dev/null 2>&1; then - echo "Detected pnpm-lock.yaml; running pnpm install --frozen-lockfile" - pnpm install --frozen-lockfile + WORKERS="${{ inputs.parallel_workers }}" + if [ -z "$WORKERS" ]; then + if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then + WORKERS="6" else - echo "::warning::pnpm-lock.yaml detected but pnpm is unavailable. Falling back to npm install." - npm install + WORKERS="1" fi - else - echo "No lockfile detected; running npm install" - npm install + elif [ "$WORKERS" = "auto" ]; then + WORKERS="$(cgroup_auto_workers)" fi - working-directory: ${{ inputs['working-directory'] }} - - - name: Check for test collection errors - id: check-collection - run: | - exec 3>&1 4>&2 - exec 1> >(tee -a debug_target_collection.log) 2>&1 - echo "Running Mocha collection check on target branch..." + echo "Running tests with $WORKERS workers..." - HAS_COLLECTION_ERRORS="false" - NO_TESTS_FOUND="false" - ERROR_TYPE="none" - ERROR_DETAILS="none" + # Build parallel flag for Mocha + PARALLEL_FLAG="" + if [ "$WORKERS" != "1" ]; then + PARALLEL_FLAG="--parallel --jobs $WORKERS" + fi MOCHA_COMMAND="${{ inputs['mocha-command'] }}" MOCHA_EXTRA_ARGS="${{ inputs['mocha-extra-args'] }}" - if ! echo "$MOCHA_EXTRA_ARGS" | grep -q "--reporter"; then - MOCHA_EXTRA_ARGS="$MOCHA_EXTRA_ARGS --reporter json" - fi - - COLLECTION_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS --dry-run" - - echo "Executing: $COLLECTION_COMMAND" - set +e - eval "$COLLECTION_COMMAND" > collection_output.json 2> collection_output.txt - EXIT_CODE=$? + eval "$MOCHA_COMMAND $MOCHA_EXTRA_ARGS $PARALLEL_FLAG --reporter json" > results.json 2> test_output.txt + MOCHA_EXIT=$? set -e - exec 1>&3 2>&4 - - if [ "$EXIT_CODE" -ne 0 ]; then - HAS_COLLECTION_ERRORS="true" - if grep -qi "Cannot find module" collection_output.txt; then - ERROR_TYPE="ModuleNotFoundError" - elif grep -qi "SyntaxError" collection_output.txt; then - ERROR_TYPE="SyntaxError" - elif grep -qi "TypeError" collection_output.txt; then - ERROR_TYPE="TypeError" - elif grep -qi "ReferenceError" collection_output.txt; then - ERROR_TYPE="ReferenceError" - else - ERROR_TYPE="ExecutionError" - fi - - ERROR_DETAILS=$(head -40 collection_output.txt | tr ' -' ' ' | sed 's/"/\"/g') - echo "::warning::Test discovery errors detected in target branch via Mocha ($ERROR_TYPE)" - echo "::warning::First details: ${ERROR_DETAILS:0:200}..." - else - NODE <<'NODE' - const fs = require('fs'); - const path = require('path'); - - const outputPath = path.resolve(process.cwd(), 'collection_output.json'); - let discoveredCount = 0; - - try { - const raw = fs.readFileSync(outputPath, 'utf-8'); - if (raw.trim()) { - const data = JSON.parse(raw); - if (data && data.stats && typeof data.stats.tests === 'number') { - discoveredCount = data.stats.tests; - } else if (Array.isArray(data.tests)) { - discoveredCount = data.tests.length; - } - } - } catch (error) { - console.log(`::warning::Unable to parse Mocha discovery JSON for target branch: ${error.message}`); - } - - fs.writeFileSync( - process.env.GITHUB_OUTPUT, - `target_tests_discovered=${discoveredCount} -`, - { flag: 'a' } - ); - NODE - - TEST_COUNT=$(grep -o "target_tests_discovered=[0-9]*" $GITHUB_OUTPUT | tail -1 | cut -d'=' -f2) - if [ -z "$TEST_COUNT" ]; then - TEST_COUNT=0 - fi - - if [ "$TEST_COUNT" = "0" ]; then - NO_TESTS_FOUND="true" - ERROR_TYPE="NoTestsFound" - ERROR_DETAILS="Mocha --dry-run did not discover any tests on the target branch" - echo "::warning::No tests were found in the target branch" - else - echo "Found $TEST_COUNT test(s) in target branch" - fi - fi - - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT - - if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT - else - echo "has_errors=false" >> $GITHUB_OUTPUT - fi - - echo "=== FULL COLLECTION OUTPUT ===" >> debug_target_collection.log - cat collection_output.txt >> debug_target_collection.log - working-directory: ${{ inputs['working-directory'] }} - - - name: Run tests on target branch - if: steps.check-collection.outputs.has_collection_errors != 'true' - run: | - echo "Running Mocha tests on target branch..." - MOCHA_COMMAND="${{ inputs['mocha-command'] }}" - MOCHA_EXTRA_ARGS="${{ inputs['mocha-extra-args'] }}" + echo "mocha_exit_code=$MOCHA_EXIT" >> "$GITHUB_OUTPUT" - if ! echo "$MOCHA_EXTRA_ARGS" | grep -q "--reporter"; then - MOCHA_EXTRA_ARGS="$MOCHA_EXTRA_ARGS --reporter json" + if [ "$MOCHA_EXIT" -eq 137 ]; then + echo "::warning::Tests were killed (exit 137) - likely OOM. Partial results may be available." fi - TEST_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS" - echo "Executing: $TEST_COMMAND" - - set +e - eval "$TEST_COMMAND" > >(tee target_results.json) 2> target_test_output.txt - EXIT_CODE=$? - set -e - - if [ -s target_results.json ]; then - echo "✅ Test execution completed (exit code: $EXIT_CODE)" + if [ -s results.json ]; then + echo "Test execution completed (exit code: $MOCHA_EXIT)" else - echo "❌ Test execution did not produce target_results.json (exit code: $EXIT_CODE)" + echo "No results.json - creating empty results file" + echo '{"stats": {"tests": 0, "passes": 0}, "tests": []}' > results.json fi working-directory: ${{ inputs['working-directory'] }} - - name: Extract test results and create artifacts + - name: Extract test results id: extract-results - # Only run if there were no collection errors - if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - echo "Processing test results for target branch: ${{ inputs.target_branch_to_compare }}" - - # Create debug file for detailed output - exec 3>&1 4>&2 - exec 1> >(tee -a debug_target_extract_results.log) 2>&1 - node <<'NODE' const fs = require('fs'); const path = require('path'); - const resultsPath = path.resolve(process.cwd(), 'target_results.json'); - let targetTotal = 0; - let targetPassed = 0; - let targetPercentage = 0; + let total = 0; + let passed = 0; + let percentage = 0; const passingTests = []; const failingTests = []; + const errorTests = []; const skippedTests = []; const xfailedTests = []; - const skippedTestsWithReasons = {}; - const xfailedTestsWithReasons = {}; + const xpassedTests = []; const allTests = []; + const skippedWithReasons = {}; + const xfailedWithReasons = {}; const warningsList = []; const safeRead = (filePath) => { @@ -670,7 +352,7 @@ jobs: } }; - const rawResults = safeRead(resultsPath); + const rawResults = safeRead('results.json'); if (rawResults) { try { @@ -678,8 +360,8 @@ jobs: const stats = data?.stats ?? {}; const tests = Array.isArray(data?.tests) ? data.tests : []; - targetTotal = Number.isFinite(stats.tests) ? Number(stats.tests) : tests.length; - targetPassed = Number.isFinite(stats.passes) + total = Number.isFinite(stats.tests) ? Number(stats.tests) : tests.length; + passed = Number.isFinite(stats.passes) ? Number(stats.passes) : Array.isArray(data?.passes) ? data.passes.length @@ -698,11 +380,16 @@ jobs: passingTests.push(identifier); break; case 'failed': - failingTests.push(identifier); + // Check if it's an error vs assertion failure + if (test.err && test.err.name && !test.err.name.includes('AssertionError')) { + errorTests.push(identifier); + } else { + failingTests.push(identifier); + } break; case 'pending': skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = + skippedWithReasons[identifier] = (test.err && test.err.message) || 'Marked as pending in Mocha output'; break; default: { @@ -728,592 +415,88 @@ jobs: const pendingMatch = matchBy(data?.pending); if (pendingMatch) { skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = + skippedWithReasons[identifier] = (pendingMatch.err && pendingMatch.err.message) || 'Marked as pending in Mocha output'; break; } } skippedTests.push(identifier); - skippedTestsWithReasons[identifier] = 'Test state unknown; treated as skipped'; + skippedWithReasons[identifier] = 'Test state unknown; treated as skipped'; break; } } } - if (!targetPassed && passingTests.length) { - targetPassed = passingTests.length; + if (!passed && passingTests.length) { + passed = passingTests.length; } - if (targetTotal > 0) { - targetPercentage = (targetPassed / targetTotal) * 100; - } + percentage = total > 0 ? (passed / total) * 100 : 0; + + console.log(`Parsed Mocha results: ${passed}/${total} passed (${percentage.toFixed(2)}%)`); } catch (error) { - console.error(`Failed to parse Mocha JSON results: ${error.message}`); + console.log(`Error parsing results.json: ${error.message}`); } + } else { + console.log('results.json not found. No test data to parse.'); } - const summary = { - total: targetTotal, - passed: targetPassed, - percentage: Number.isFinite(targetPercentage) ? targetPercentage.toFixed(2) : '0.00', - }; + // Extract warnings from test output + const outputContent = safeRead('test_output.txt'); + if (outputContent) { + const warnLines = outputContent + .split('\n') + .map((line) => line.trimEnd()) + .filter((line) => /\bWARN(?:ING)?\b/i.test(line)); + if (warnLines.length) { + warningsList.push(...warnLines.slice(0, 200)); + console.log(`Collected ${warningsList.length} warning line(s) from Mocha output.`); + } + } - const output = { - summary, + // Save artifact data + const testData = { passing_tests: passingTests, failing_tests: failingTests, + error_tests: errorTests, skipped_tests: skippedTests, xfailed_tests: xfailedTests, + xpassed_tests: xpassedTests, all_tests: allTests, - skipped_tests_with_reasons: skippedTestsWithReasons, - xfailed_tests_with_reasons: xfailedTestsWithReasons, + skipped_tests_with_reasons: skippedWithReasons, + xfailed_tests_with_reasons: xfailedWithReasons, warnings: warningsList, }; - fs.writeFileSync('target_test_data.json', JSON.stringify(output, null, 2)); + fs.writeFileSync('test_data.json', JSON.stringify(testData, null, 2)); + + console.log(`Results: ${passed}/${total} (${percentage.toFixed(2)}%)`); const outputLines = [ - `total=${targetTotal}`, - `passed=${targetPassed}`, - `percentage=${Number.isFinite(targetPercentage) ? targetPercentage.toFixed(2) : '0.00'}`, - `passing_count=${passingTests.length}`, + `total=${total}`, + `passed=${passed}`, + `percentage=${percentage.toFixed(2)}`, + `failing_count=${failingTests.length}`, + `error_count=${errorTests.length}`, + `skipped_count=${skippedTests.length}`, + `xfailed_count=${xfailedTests.length}`, ]; - fs.appendFileSync(process.env.GITHUB_OUTPUT, `${outputLines.join('\n')}\n`); + if (process.env.GITHUB_OUTPUT) { + fs.appendFileSync(process.env.GITHUB_OUTPUT, `${outputLines.join('\n')}\n`); + } NODE - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Target branch test results processed: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} tests passed (${{ steps.extract-results.outputs.percentage }}%)" working-directory: ${{ inputs['working-directory'] }} - - name: Upload target branch artifacts + - name: Upload test artifacts if: always() uses: actions/upload-artifact@v4 with: - name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} + name: ${{ inputs.artifact_name }} path: | - ${{ inputs['working-directory'] }}/target_test_data.json - ${{ inputs['working-directory'] }}/target_test_output.txt - ${{ inputs['working-directory'] }}/target_results.json - ${{ inputs['working-directory'] }}/collection_output.json - ${{ inputs['working-directory'] }}/collection_output.txt - ${{ inputs['working-directory'] }}/debug_target_collection.log - ${{ inputs['working-directory'] }}/debug_target_extract_results.log + ${{ inputs['working-directory'] }}/test_data.json + ${{ inputs['working-directory'] }}/test_output.txt + ${{ inputs['working-directory'] }}/results.json retention-days: 3 if-no-files-found: ignore - - # Add a step to set default outputs when collection errors are detected - - name: Set collection error outputs - id: set-error-outputs - if: steps.check-collection.outputs.has_collection_errors == 'true' - run: | - echo "::warning::Setting default outputs for target branch due to collection errors" - echo "total=0" >> $GITHUB_OUTPUT - echo "passed=0" >> $GITHUB_OUTPUT - echo "percentage=0.00" >> $GITHUB_OUTPUT - echo "passing_count=0" >> $GITHUB_OUTPUT - - compare-results: - needs: [test-source-branch, test-target-branch] - uses: ./.github/workflows/regression-test.yml - with: - runs_on: ${{ inputs.runs_on }} - baseline_label: ${{ inputs.target_branch_to_compare }} - baseline_results_artifact: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - baseline_results_filename: target_test_data.json - current_label: ${{ github.head_ref || github.ref_name || 'source branch' }} - current_results_artifact: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - current_results_filename: pr_test_data.json - baseline_passed: ${{ needs.test-target-branch.outputs.passed }} - baseline_total: ${{ needs.test-target-branch.outputs.total }} - baseline_percentage: ${{ needs.test-target-branch.outputs.percentage }} - current_passed: ${{ needs.test-source-branch.outputs.passed }} - current_total: ${{ needs.test-source-branch.outputs.total }} - current_percentage: ${{ needs.test-source-branch.outputs.percentage }} - baseline_collection_errors: ${{ needs.test-target-branch.outputs.collection_errors }} - baseline_no_tests_found: ${{ needs.test-target-branch.outputs.no_tests_found }} - current_collection_errors: ${{ needs.test-source-branch.outputs.collection_errors }} - current_no_tests_found: ${{ needs.test-source-branch.outputs.no_tests_found }} - artifact_name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - - perform-regression-analysis: - needs: [test-source-branch, test-target-branch] - uses: ./.github/workflows/meta-regression-analysis.yml - with: - item_type_singular: "test" - item_type_plural: "tests" - pr_number: ${{ github.event.pull_request.number }} - run_id: ${{ github.run_id }} - target_branch_artifact_name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - pr_branch_artifact_name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - - # Conditionally run notification job only if needed - prepare-notification: - name: Prepare Notification Data - needs: - [ - test-source-branch, - test-target-branch, - compare-results, - perform-regression-analysis, - ] - # Notify on collection errors, no tests found, compare result failure, or if regressions are detected - if: | - always() && - ( - needs.test-source-branch.outputs.collection_errors == 'true' || - needs.test-source-branch.outputs.no_tests_found == 'true' || - needs.compare-results.result == 'failure' || - needs.perform-regression-analysis.outputs.has_regressions == 'true' - ) - runs-on: ${{ fromJSON(inputs.runs_on) }} - outputs: - message_body: ${{ steps.construct_notification.outputs.message_body_out }} - ping_user_ids: ${{ steps.construct_notification.outputs.ping_user_ids_out }} - artifact_path: ${{ steps.construct_notification.outputs.artifact_path_out }} - should_notify: "true" - webhook_available_for_alert: ${{ steps.check_webhook_availability.outputs.webhook_available }} - - steps: - - name: Check for Discord Webhook URL - id: check_webhook_availability - run: | - if [ -z "${{ secrets.DISCORD_WEBHOOK_URL }}" ]; then - echo "::notice::DISCORD_WEBHOOK_URL secret is not set. Discord notifications will likely be skipped by the alert workflow if it relies on this secret." - echo "webhook_available=false" >> $GITHUB_OUTPUT - else - echo "webhook_available=true" >> $GITHUB_OUTPUT - fi - - name: Download regression details (if any) - id: download_regressions - if: always() - uses: actions/download-artifact@v4 - with: - name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - path: . # Download to current directory - continue-on-error: true - - - name: Check downloaded regression file - if: always() - run: | - echo "Checking for regression details file..." - if [ -f "regression_details.txt" ]; then - echo "✅ Regression details file found" - echo "File size: $(wc -c < regression_details.txt) bytes" - echo "First few lines:" - head -5 regression_details.txt - else - echo "❌ Regression details file not found" - fi - - if [ -f "comprehensive_regression_report.txt" ]; then - echo "✅ Comprehensive regression report found" - echo "File size: $(wc -c < comprehensive_regression_report.txt) bytes" - else - echo "❌ Comprehensive regression report not found" - fi - - - name: Construct Discord Notification - id: construct_notification - env: - LINT_RESULT: ${{ needs.lint.result }} - SOURCE_TEST_RESULT: ${{ needs.test-source-branch.result }} - TARGET_TEST_RESULT: ${{ needs.test-target-branch.result }} - COMPARE_RESULT: ${{ needs.compare-results.result }} - PR_COLLECTION_ERRORS: ${{ needs.test-source-branch.outputs.collection_errors }} - PR_NO_TESTS_FOUND: ${{ needs.test-source-branch.outputs.no_tests_found }} - PR_ERROR_TYPE: ${{ needs.test-source-branch.outputs.error_type }} - PR_ERROR_DETAILS_TRUNCATED: ${{ needs.test-source-branch.outputs.error_details }} - HAS_REGRESSIONS: ${{ needs.perform-regression-analysis.outputs.has_regressions }} - REGRESSION_COUNT: ${{ needs.perform-regression-analysis.outputs.regression_count }} - PR_TOTAL_TESTS: ${{ needs.test-source-branch.outputs.total }} - PR_PASSED_TESTS: ${{ needs.test-source-branch.outputs.passed }} - PR_PERCENTAGE: ${{ needs.test-source-branch.outputs.percentage }} - TARGET_TOTAL_TESTS: ${{ needs.test-target-branch.outputs.total }} - TARGET_PASSED_TESTS: ${{ needs.test-target-branch.outputs.passed }} - TARGET_PERCENTAGE: ${{ needs.test-target-branch.outputs.percentage }} - PR_NUMBER: ${{ github.event.pull_request.number }} - PR_TITLE: ${{ github.event.pull_request.title }} - PR_URL: ${{ github.event.pull_request.html_url }} - TARGET_BRANCH_NAME: ${{ inputs.target_branch_to_compare }} - PR_BRANCH_NAME: ${{ github.head_ref }} - REPO_URL: ${{ github.server_url }}/${{ github.repository }} - ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - GH_ASSIGNEES_JSON: ${{ toJson(github.event.pull_request.assignees) }} - USER_MAP_JSON: ${{ secrets.DISCORD_USER_MAP || '{}' }} - REGRESSION_FILE_PATH: "regression_details.txt" - DOWNLOAD_REGRESSIONS_OUTCOME: ${{ steps.download_regressions.outcome }} - INPUT_PING_LATEST_COMMITTER: ${{ inputs.ping_latest_committer }} - run: | - # Create debug file for detailed notification construction - exec 3>&1 4>&2 - exec 1> >(tee -a debug_notification_construction.log) 2>&1 - - MESSAGE_LINES=() # Use an array to build message lines - PING_KEYS_OUTPUT="" # Will be comma-separated GitHub logins - ARTIFACT_PATH_OUTPUT="" - - echo "Raw GH_ASSIGNEES_JSON value: [$GH_ASSIGNEES_JSON]" - echo "Raw USER_MAP_JSON value: [$USER_MAP_JSON]" - - # 1. Determine Pings - Collect GitHub Logins to pass to alert-discord.yml - # Initialize PING_KEYS_OUTPUT - PING_KEYS_OUTPUT="" - - # Add assignees to PING_KEYS_OUTPUT - if [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && command -v jq &> /dev/null; then - ASSIGNEE_LOGINS_ARRAY=($(echo "$GH_ASSIGNEES_JSON" | jq -r '.[].login // empty')) - echo "GH_ASSIGNEES_JSON received: $GH_ASSIGNEES_JSON" - echo "Extracted ASSIGNEE_LOGINS_ARRAY: (${ASSIGNEE_LOGINS_ARRAY[*]})" - echo "Count of assignees extracted: ${#ASSIGNEE_LOGINS_ARRAY[@]}" - - MAPPED_ASSIGNEE_COUNT=0 - TEMP_PING_KEYS=() - - for assignee_login in "${ASSIGNEE_LOGINS_ARRAY[@]}"; do - if [ -z "$assignee_login" ]; then - echo "Skipping empty assignee login." - continue - fi - echo "Processing assignee for ping: '$assignee_login'" - # Check if this assignee_login exists as a key in USER_MAP_JSON - if echo "$USER_MAP_JSON" | jq -e --arg K "$assignee_login" '.[$K]' > /dev/null; then - echo "Assignee '$assignee_login' FOUND in USER_MAP_JSON." - TEMP_PING_KEYS+=("$assignee_login") - MAPPED_ASSIGNEE_COUNT=$((MAPPED_ASSIGNEE_COUNT + 1)) - else - echo "Assignee '$assignee_login' NOT FOUND in USER_MAP_JSON." - fi - done - - echo "Total assignees found in USER_MAP_JSON and added to pings: $MAPPED_ASSIGNEE_COUNT" - - if [ ${#TEMP_PING_KEYS[@]} -gt 0 ]; then - PING_KEYS_OUTPUT=$(IFS=,; echo "${TEMP_PING_KEYS[*]}") - echo "Initial PING_KEYS_OUTPUT from assignees: [$PING_KEYS_OUTPUT]" - else - echo "No assignees found or GH_ASSIGNEES_JSON was empty, or no assignees were found in USER_MAP_JSON." - fi - elif [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && ! command -v jq &> /dev/null; then - echo "::warning::jq is not available. Cannot determine GitHub users (assignees) for pings." - else - echo "No user map JSON or jq not found. PING_KEYS_OUTPUT (from assignees) will be empty." - fi - - # Add latest committer if INPUT_PING_LATEST_COMMITTER is true - if [[ "$INPUT_PING_LATEST_COMMITTER" == "true" ]]; then - echo "INPUT_PING_LATEST_COMMITTER is true. Attempting to fetch latest committer for PR #${PR_NUMBER}." - if command -v gh &> /dev/null && [ -n "$PR_NUMBER" ]; then - LATEST_COMMITTER_LOGIN_RAW=$(gh pr view "$PR_NUMBER" --json commits --jq '.commits[-1].author.login' 2>/dev/null || echo "") - - if [ -n "$LATEST_COMMITTER_LOGIN_RAW" ] && [ "$LATEST_COMMITTER_LOGIN_RAW" != "null" ]; then - # Apply bot filter (e.g., names ending in [bot] or -bot) - LATEST_COMMITTER_LOGIN=$(echo "$LATEST_COMMITTER_LOGIN_RAW" | grep -v -E -i '(\[bot\]$|-bot$)' || echo "") - - if [ -n "$LATEST_COMMITTER_LOGIN" ]; then - echo "Latest committer identified: $LATEST_COMMITTER_LOGIN" - - # Check if this committer is already in PING_KEYS_OUTPUT - ALREADY_IN_LIST=0 - if [ -n "$PING_KEYS_OUTPUT" ]; then # Only check if PING_KEYS_OUTPUT is not empty - IFS=',' read -ra PING_ARRAY <<< "$PING_KEYS_OUTPUT" - for key in "${PING_ARRAY[@]}"; do - if [[ "$key" == "$LATEST_COMMITTER_LOGIN" ]]; then - ALREADY_IN_LIST=1 - break - fi - done - fi - - if [[ "$ALREADY_IN_LIST" -eq 0 ]]; then - if [ -z "$PING_KEYS_OUTPUT" ]; then - PING_KEYS_OUTPUT="$LATEST_COMMITTER_LOGIN" - else - PING_KEYS_OUTPUT="$PING_KEYS_OUTPUT,$LATEST_COMMITTER_LOGIN" - fi - echo "Added latest committer '$LATEST_COMMITTER_LOGIN' to PING_KEYS_OUTPUT. New list: [$PING_KEYS_OUTPUT]" - else - echo "Latest committer '$LATEST_COMMITTER_LOGIN' is already in PING_KEYS_OUTPUT (likely an assignee)." - fi - else - echo "Latest committer login '$LATEST_COMMITTER_LOGIN_RAW' was filtered out (likely a bot or pattern match) or empty after filter." - fi - else - echo "No latest committer login found for PR #$PR_NUMBER from gh command, or login was null." - fi - else - if ! command -v gh &> /dev/null; then - echo "::warning::gh command not available. Cannot fetch latest committer." - fi - if [ -z "$PR_NUMBER" ]; then - echo "::warning::PR_NUMBER is not set (event might not be a pull_request). Cannot fetch latest committer." - fi - fi - fi - - # Restore stdout/stderr for GitHub Actions to show final summary - exec 1>&3 2>&4 - - # Make this a standard echo for better visibility of the final list - echo "Final Ping Keys Output (GitHub Logins from test-js-mocha.yml): [$PING_KEYS_OUTPUT]" - echo "ping_user_ids_out=$PING_KEYS_OUTPUT" >> $GITHUB_OUTPUT - - # Store branch names in variables with proper quoting - PR_BRANCH="${PR_BRANCH_NAME:-unknown}" - TARGET_BRANCH="${TARGET_BRANCH_NAME:-unknown}" - - # 2. Construct Message Body - MESSAGE_LINES+=("**Mocha Comparison & Regression Analysis for PR [#${PR_NUMBER}: ${PR_TITLE}](${PR_URL})**") - MESSAGE_LINES+=("Branch: [\`${PR_BRANCH}\`](${REPO_URL}/tree/${PR_BRANCH}) against [\`${TARGET_BRANCH}\`](${REPO_URL}/tree/${TARGET_BRANCH})") - MESSAGE_LINES+=("---") - - # Job Status Summary - MESSAGE_LINES+=("**Job Status:**") - LINT_STATUS="Success" - if [[ "$LINT_RESULT" == "failure" ]]; then LINT_STATUS="Failed"; elif [[ "$LINT_RESULT" == "skipped" ]]; then LINT_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Linting: $LINT_STATUS") - - SOURCE_TEST_STATUS="Success" - if [[ "$SOURCE_TEST_RESULT" == "failure" ]]; then SOURCE_TEST_STATUS="Failed"; elif [[ "$SOURCE_TEST_RESULT" == "skipped" ]]; then SOURCE_TEST_STATUS="Skipped"; fi - MESSAGE_LINES+=("- PR Branch Tests (\`${PR_BRANCH}\`): $SOURCE_TEST_STATUS") - - TARGET_TEST_STATUS="Success" - if [[ "$TARGET_TEST_RESULT" == "failure" ]]; then TARGET_TEST_STATUS="Failed"; elif [[ "$TARGET_TEST_RESULT" == "skipped" ]]; then TARGET_TEST_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Target Branch Tests (\`${TARGET_BRANCH}\`): $TARGET_TEST_STATUS") - - COMPARE_STATUS="Success" - if [[ "$COMPARE_RESULT" == "failure" ]]; then COMPARE_STATUS="Failed"; elif [[ "$COMPARE_RESULT" == "skipped" ]]; then COMPARE_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Comparison & Regression: $COMPARE_STATUS") - MESSAGE_LINES+=("---") - - # Test Discovery Issues in PR Branch - if [[ "$PR_COLLECTION_ERRORS" == "true" ]]; then - MESSAGE_LINES+=("**:red_circle: ERROR: Test Discovery Failed in PR Branch (\`${PR_BRANCH}\`)**") - MESSAGE_LINES+=(" - Type: \`${PR_ERROR_TYPE}\`") - MESSAGE_LINES+=(" - This usually indicates import errors or syntax issues preventing tests from being collected.") - MESSAGE_LINES+=(" - See attached file for detailed error information.") - elif [[ "$PR_NO_TESTS_FOUND" == "true" ]]; then - MESSAGE_LINES+=("**:warning: WARNING: No Tests Found in PR Branch (\`${PR_BRANCH}\`)**") - MESSAGE_LINES+=(" - Mocha did not discover any test files matching its patterns.") - MESSAGE_LINES+=(" - Ensure your test files are correctly named (e.g., \`test_*.py\` or \`*_test.py\`) and located.") - fi - - # Regression Analysis Summary - if [[ "$HAS_REGRESSIONS" == "true" ]]; then - MESSAGE_LINES+=("**:red_circle: REGRESSIONS DETECTED**") - - # Check if we have comprehensive regression file with categories - if [ -f "comprehensive_regression_report.txt" ]; then - # Extract counts from comprehensive report - PASS_FAIL_COUNT=$(grep -o "PASS-TO-FAIL REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - PASS_SKIP_COUNT=$(grep -o "PASS-TO-SKIP REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_SKIP_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-SKIP IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_PASS_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-PASS IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - PASS_GONE_COUNT=$(grep -o "PASS-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_GONE_COUNT=$(grep -o "FAIL-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - DISCOVERY_COUNT=$(grep -o "DISCOVERY REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - NEW_TESTS_COUNT=$(grep -o "NEW TESTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - - # Add category summaries (≤5 show paths, >5 show count + refer to file) - if [[ "$PASS_FAIL_COUNT" -gt 0 ]]; then - if [[ "$PASS_FAIL_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Fail ($PASS_FAIL_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-FAIL REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_FAIL_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Fail:** $PASS_FAIL_COUNT tests (see attached file)") - fi - fi - - if [[ "$PASS_SKIP_COUNT" -gt 0 ]]; then - if [[ "$PASS_SKIP_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Skip ($PASS_SKIP_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-SKIP REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_SKIP_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Skip:** $PASS_SKIP_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements ($FAIL_SKIP_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements:** $FAIL_SKIP_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements ($FAIL_PASS_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-PASS IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_PASS_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements:** $FAIL_PASS_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$PASS_GONE_COUNT" -gt 0 ]]; then - if [[ "$PASS_GONE_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Pass→Gone ($PASS_GONE_COUNT):**") - readarray -t test_paths < <(grep -A 100 "PASS-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_GONE_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Pass→Gone:** $PASS_GONE_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_GONE_COUNT" -gt 0 ]]; then - if [[ "$FAIL_GONE_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Fail→Gone ($FAIL_GONE_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_GONE_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=("**Fail→Gone:** $FAIL_GONE_COUNT tests (see attached file)") - fi - fi - - if [[ "$DISCOVERY_COUNT" -gt 0 ]]; then - if [[ "$DISCOVERY_COUNT" -le 5 ]]; then - MESSAGE_LINES+=("**Discovery Warnings ($DISCOVERY_COUNT):**") - MESSAGE_LINES+=("• $DISCOVERY_COUNT new warnings (see attached file)") - else - MESSAGE_LINES+=("**Discovery Warnings:** $DISCOVERY_COUNT warnings (see attached file)") - fi - fi - - if [[ "$NEW_TESTS_COUNT" -gt 0 ]]; then - if [[ "$NEW_TESTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":sparkles: **New Tests ($NEW_TESTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "NEW TESTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$NEW_TESTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":sparkles: **New Tests:** $NEW_TESTS_COUNT tests (see attached file)") - fi - fi - - else - # Fallback to simple regression count - MESSAGE_LINES+=(" - **${REGRESSION_COUNT} test(s)** that were passing in \`${TARGET_BRANCH}\` are now **failing** in \`${PR_BRANCH}\`.") - fi - elif [[ "$COMPARE_RESULT" == "failure" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then - # This case handles general comparison failures NOT due to specific regressions - MESSAGE_LINES+=("**:warning: TEST RESULTS DECLINED**") - MESSAGE_LINES+=(" - The PR branch shows a decrease in test success compared to the target branch, but no specific regressions were identified by the \`meta-regression-analysis\` job.") - MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**") - MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**") - elif [[ "$COMPARE_RESULT" == "success" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then - MESSAGE_LINES+=("**:white_check_mark: NO REGRESSIONS DETECTED**") - MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**") - MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**") - fi - - if [[ "$HAS_REGRESSIONS" != "true" ]] && [ -f "comprehensive_regression_report.txt" ]; then - FAIL_SKIP_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-SKIP IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - FAIL_PASS_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-PASS IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - NEW_TESTS_COUNT=$(grep -o "NEW TESTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 || "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 || "$NEW_TESTS_COUNT" -gt 0 ]]; then - MESSAGE_LINES+=("**:sparkles: Improvements & Additions**") - - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements ($FAIL_SKIP_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements:** $FAIL_SKIP_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 ]]; then - if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements ($FAIL_PASS_IMPROVEMENTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "FAIL-TO-PASS IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_PASS_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements:** $FAIL_PASS_IMPROVEMENTS_COUNT tests (see attached file)") - fi - fi - - if [[ "$NEW_TESTS_COUNT" -gt 0 ]]; then - if [[ "$NEW_TESTS_COUNT" -le 5 ]]; then - MESSAGE_LINES+=(":sparkles: **New Tests ($NEW_TESTS_COUNT):**") - readarray -t test_paths < <(grep -A 100 "NEW TESTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$NEW_TESTS_COUNT | sed 's/^ [0-9]\+\. //') - for test_path in "${test_paths[@]}"; do - MESSAGE_LINES+=("• \`$test_path\`") - done - else - MESSAGE_LINES+=(":sparkles: **New Tests:** $NEW_TESTS_COUNT tests (see attached file)") - fi - fi - fi - fi - - MESSAGE_LINES+=("---") - MESSAGE_LINES+=("[View Workflow Run](${ACTION_RUN_URL})") - - # Set artifact path - always prefer comprehensive report if it exists - if [ -f "comprehensive_regression_report.txt" ]; then - ARTIFACT_PATH_OUTPUT="comprehensive_regression_report.txt" - elif [ -f "$REGRESSION_FILE_PATH" ] && [[ "$DOWNLOAD_REGRESSIONS_OUTCOME" == "success" ]]; then - ARTIFACT_PATH_OUTPUT="$REGRESSION_FILE_PATH" - else - ARTIFACT_PATH_OUTPUT="" - fi - - # Construct with actual newlines - FINAL_MESSAGE_BODY=$(printf "%s\\n" "${MESSAGE_LINES[@]}") - if [ ${#MESSAGE_LINES[@]} -gt 0 ]; then - # Remove the very last actual newline - FINAL_MESSAGE_BODY="${FINAL_MESSAGE_BODY%\\n}" - fi - - echo "Final message body prepared in test-js-mocha.yml" - - echo "message_body_out<> $GITHUB_OUTPUT - echo "$FINAL_MESSAGE_BODY" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - echo "artifact_path_out=$ARTIFACT_PATH_OUTPUT" >> $GITHUB_OUTPUT - - notify-discord: - name: Send Discord Notification - needs: [prepare-notification] - if: | - always() && - needs.prepare-notification.outputs.should_notify == 'true' && - needs.prepare-notification.outputs.webhook_available_for_alert == 'true' - uses: ./.github/workflows/alert-discord.yml - with: - message_body: ${{ needs.prepare-notification.outputs.message_body }} - ping_user_ids: ${{ needs.prepare-notification.outputs.ping_user_ids }} - artifact_paths: ${{ needs.prepare-notification.outputs.artifact_path }} - should_notify: ${{ needs.prepare-notification.outputs.should_notify }} - runs_on: ${{ inputs.runs_on }} - secrets: - DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }} - DISCORD_USER_MAP: ${{ secrets.DISCORD_USER_MAP }} diff --git a/.github/workflows/test-py-bandit.yml b/.github/workflows/test-py-bandit.yml index 82237bb..d0705e4 100644 --- a/.github/workflows/test-py-bandit.yml +++ b/.github/workflows/test-py-bandit.yml @@ -1,130 +1,220 @@ name: Reusable Bandit Security Check with Regression Detection -# This reusable workflow is triggered by other workflows using 'workflow_call' on: workflow_call: inputs: - target_branch_to_compare: - description: "Target branch to compare against (e.g., main)" + ref: + description: "Git ref to checkout and test. Leave empty for default checkout." + required: false + type: string + default: "" + target_branch: + description: "Target branch to compare against for regression detection (e.g., main)" required: true type: string + python-version: + description: "Python version to use for Bandit." + required: false + type: string + default: "3.10" runs_on: + description: "Runner label for the jobs." required: false type: string default: '["ubuntu-latest"]' + artifact_name: + description: "Base name for the security scan artifacts." + required: false + type: string + default: "bandit-results" + severity_level: + description: "Minimum severity level to report (-l, -ll, or -lll). Default -lll (high only)." + required: false + type: string + default: "-lll" outputs: + pr_issues_count: + description: "Number of issues found on PR branch" + value: ${{ jobs.run-bandit-pr.outputs.issues_count }} + target_issues_count: + description: "Number of issues found on target branch" + value: ${{ jobs.run-bandit-target.outputs.issues_count }} + new_issues_count: + description: "Number of new issues introduced in PR" + value: ${{ jobs.compare-results.outputs.new_issues_count }} + resolved_issues_count: + description: "Number of issues resolved in PR" + value: ${{ jobs.compare-results.outputs.resolved_issues_count }} + has_regressions: + description: "Whether new security issues were introduced" + value: ${{ jobs.compare-results.outputs.has_regressions }} bandit_issues_json: description: "JSON output of Bandit issues on PR branch" - value: ${{ jobs.run-bandit.outputs.bandit_issues_json }} + value: ${{ jobs.run-bandit-pr.outputs.bandit_issues_json }} jobs: # Job 1: Run Bandit on the PR branch - run-bandit: - name: Run Bandit on PR Branch & Extract Results + run-bandit-pr: + name: Run Bandit on PR Branch runs-on: ${{ fromJSON(inputs.runs_on) }} outputs: - bandit_issues_json: ${{ steps.extract-pr.outputs.BANDIT_JSON }} + bandit_issues_json: ${{ steps.extract-results.outputs.bandit_json }} + issues_count: ${{ steps.extract-results.outputs.issues_count }} steps: - # Step 1: Checkout the current pull request code - name: Checkout PR Branch - uses: actions/checkout@v4.1.1 + uses: actions/checkout@v4.2.2 with: + submodules: "recursive" + ref: ${{ inputs.ref || github.ref }} persist-credentials: false - # Step 2: Set up Python 3.10 environment - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v5.3.0 with: - python-version: "3.10" + python-version: "${{ inputs.python-version }}" - # Step 3: Install Bandit (Python security scanner) - name: Install Bandit - run: pip install bandit + run: | + python -m pip install --upgrade pip + pip install bandit + + - name: Run Bandit Security Scan + run: | + bandit -r . ${{ inputs.severity_level }} -f json -o bandit_output.json || true - # Step 4: Run Bandit and output results to a file - - name: Run Bandit on PR Branch + - name: Extract Results + id: extract-results run: | - bandit -r . -lll -f json -o pr_bandit_output.json || true + if [ -f bandit_output.json ]; then + ISSUES_JSON=$(cat bandit_output.json | jq -c '.results') + ISSUES_COUNT=$(cat bandit_output.json | jq '.results | length') + else + ISSUES_JSON="[]" + ISSUES_COUNT=0 + fi + echo "bandit_json=$ISSUES_JSON" >> "$GITHUB_OUTPUT" + echo "issues_count=$ISSUES_COUNT" >> "$GITHUB_OUTPUT" + echo "Found $ISSUES_COUNT security issues on PR branch" - # Step 5: Upload the results as a GitHub Actions artifact (for debugging or reporting) - - name: Upload PR Artifact + - name: Upload PR Branch Artifacts uses: actions/upload-artifact@v4 with: - name: pr_bandit_output - path: pr_bandit_output.json - - # Step 6: Extract the raw issue list from the Bandit JSON output - - name: Extract PR Bandit JSON - id: extract-pr - run: | - CONTENT=$(cat pr_bandit_output.json | jq -c '.results') - echo "BANDIT_JSON=$CONTENT" >> $GITHUB_OUTPUT + name: ${{ inputs.artifact_name }}-pr + path: bandit_output.json + retention-days: 3 + if-no-files-found: ignore # Job 2: Run Bandit on the target branch for comparison - run-bandit-on-target: + run-bandit-target: name: Run Bandit on Target Branch runs-on: ${{ fromJSON(inputs.runs_on) }} outputs: - bandit_target_json: ${{ steps.extract-target.outputs.TARGET_JSON }} + bandit_issues_json: ${{ steps.extract-results.outputs.bandit_json }} + issues_count: ${{ steps.extract-results.outputs.issues_count }} steps: - # Step 1: Checkout the base branch (e.g., main) - name: Checkout Target Branch - uses: actions/checkout@v4 + uses: actions/checkout@v4.2.2 with: - ref: ${{ inputs.target_branch_to_compare }} + submodules: "recursive" + ref: ${{ inputs.target_branch }} persist-credentials: false - # Step 2: Set up Python environment - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v5.3.0 with: - python-version: "3.10" + python-version: "${{ inputs.python-version }}" - # Step 3: Install Bandit - name: Install Bandit - run: pip install bandit - - # Step 4: Run Bandit and save output - - name: Run Bandit on Target Branch run: | - bandit -r . -lll -f json -o target_bandit_output.json || true + python -m pip install --upgrade pip + pip install bandit - # Step 5: Upload results from the target branch - - name: Upload Target Artifact - uses: actions/upload-artifact@v4 - with: - name: target_bandit_output - path: target_bandit_output.json + - name: Run Bandit Security Scan + run: | + bandit -r . ${{ inputs.severity_level }} -f json -o bandit_output.json || true - # Step 6: Extract raw issue list from the Bandit output - - name: Extract Target Bandit JSON - id: extract-target + - name: Extract Results + id: extract-results run: | - CONTENT=$(cat target_bandit_output.json | jq -c '.results') - echo "TARGET_JSON=$CONTENT" >> $GITHUB_OUTPUT + if [ -f bandit_output.json ]; then + ISSUES_JSON=$(cat bandit_output.json | jq -c '.results') + ISSUES_COUNT=$(cat bandit_output.json | jq '.results | length') + else + ISSUES_JSON="[]" + ISSUES_COUNT=0 + fi + echo "bandit_json=$ISSUES_JSON" >> "$GITHUB_OUTPUT" + echo "issues_count=$ISSUES_COUNT" >> "$GITHUB_OUTPUT" + echo "Found $ISSUES_COUNT security issues on target branch" - # Job 3: Compare the PR results against the target to detect regressions - compare-bandit: - name: Compare Bandit Issues (Regression Analysis) + - name: Upload Target Branch Artifacts + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.artifact_name }}-target + path: bandit_output.json + retention-days: 3 + if-no-files-found: ignore + + # Job 3: Compare results and detect regressions + compare-results: + name: Compare Results (Regression Detection) runs-on: ${{ fromJSON(inputs.runs_on) }} - needs: [run-bandit, run-bandit-on-target] + needs: [run-bandit-pr, run-bandit-target] + outputs: + new_issues_count: ${{ steps.compare.outputs.new_issues_count }} + resolved_issues_count: ${{ steps.compare.outputs.resolved_issues_count }} + has_regressions: ${{ steps.compare.outputs.has_regressions }} steps: - - name: Compare JSON + - name: Compare Bandit Results + id: compare run: | echo "Comparing Bandit results between PR and target branch..." - echo "${{ needs.run-bandit.outputs.bandit_issues_json }}" > pr.json - echo "${{ needs.run-bandit-on-target.outputs.bandit_target_json }}" > target.json + # Write issues to files for comparison + echo '${{ needs.run-bandit-pr.outputs.bandit_issues_json }}' > pr_issues.json + echo '${{ needs.run-bandit-target.outputs.bandit_issues_json }}' > target_issues.json - # Compare both JSON lists to find issues present in PR but not in target - NEW_ISSUES=$(jq -n --argfile pr pr.json --argfile base target.json ' - $pr - $base | length') + # Calculate new issues (in PR but not in target) + NEW_ISSUES_COUNT=$(jq -n --argfile pr pr_issues.json --argfile base target_issues.json ' + ($pr - $base) | length') - echo "New security issues introduced: $NEW_ISSUES" + # Calculate resolved issues (in target but not in PR) + RESOLVED_ISSUES_COUNT=$(jq -n --argfile pr pr_issues.json --argfile base target_issues.json ' + ($base - $pr) | length') + + echo "new_issues_count=$NEW_ISSUES_COUNT" >> "$GITHUB_OUTPUT" + echo "resolved_issues_count=$RESOLVED_ISSUES_COUNT" >> "$GITHUB_OUTPUT" + + echo "PR Issues: ${{ needs.run-bandit-pr.outputs.issues_count }}" + echo "Target Issues: ${{ needs.run-bandit-target.outputs.issues_count }}" + echo "New Issues Introduced: $NEW_ISSUES_COUNT" + echo "Issues Resolved: $RESOLVED_ISSUES_COUNT" + + if [ "$NEW_ISSUES_COUNT" -gt 0 ]; then + echo "has_regressions=true" >> "$GITHUB_OUTPUT" + echo "::error::$NEW_ISSUES_COUNT new security issue(s) introduced in this PR" + + # Show details of new issues + echo "New issues details:" + jq -n --argfile pr pr_issues.json --argfile base target_issues.json ' + $pr - $base' | jq -r '.[] | " - \(.test_id): \(.issue_text) (\(.filename):\(.line_number))"' - if [ "$NEW_ISSUES" -gt 0 ]; then - echo "::error::New Bandit issues introduced in PR branch." exit 1 else + echo "has_regressions=false" >> "$GITHUB_OUTPUT" echo "No new security issues introduced." + if [ "$RESOLVED_ISSUES_COUNT" -gt 0 ]; then + echo "::notice::$RESOLVED_ISSUES_COUNT security issue(s) were resolved in this PR" + fi fi + + - name: Upload Comparison Artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.artifact_name }}-comparison + path: | + pr_issues.json + target_issues.json + retention-days: 3 + if-no-files-found: ignore diff --git a/.github/workflows/test-rs-cargo.yml b/.github/workflows/test-rs-cargo.yml new file mode 100644 index 0000000..084046a --- /dev/null +++ b/.github/workflows/test-rs-cargo.yml @@ -0,0 +1,504 @@ +name: Reusable Cargo Test Runner + +on: + workflow_call: + inputs: + ref: + description: "Git ref to checkout and test. Leave empty for default checkout." + required: false + type: string + default: "" + rust-version: + description: "Rust toolchain version to use for testing." + required: false + type: string + default: "stable" + runs_on: + description: "Runner label for the test job." + required: false + type: string + default: '["self-hosted", "multithreaded"]' + artifact_name: + description: "Name for the test results artifact." + required: true + type: string + parallel_workers: + description: "Number of parallel test threads. Leave empty for runner default (6 for multithreaded, 1 for singlethreaded). Use 'auto' for cgroup-aware CPU count, or a number." + required: false + type: string + default: "" + working_directory: + description: "Working directory for cargo commands (where Cargo.toml is located)." + required: false + type: string + default: "." + cargo_test_args: + description: "Additional arguments to pass to cargo test." + required: false + type: string + default: "" + outputs: + total: + description: "Total number of tests" + value: ${{ jobs.test.outputs.total }} + passed: + description: "Number of passing tests" + value: ${{ jobs.test.outputs.passed }} + percentage: + description: "Pass percentage" + value: ${{ jobs.test.outputs.percentage }} + collection_errors: + description: "Whether compilation/collection errors occurred" + value: ${{ jobs.test.outputs.collection_errors }} + no_tests_found: + description: "Whether no tests were found" + value: ${{ jobs.test.outputs.no_tests_found }} + has_errors: + description: "Whether any errors occurred" + value: ${{ jobs.test.outputs.has_errors }} + error_type: + description: "Type of error if any" + value: ${{ jobs.test.outputs.error_type }} + failing_count: + description: "Number of failing tests" + value: ${{ jobs.test.outputs.failing_count }} + error_count: + description: "Number of errored tests" + value: ${{ jobs.test.outputs.error_count }} + skipped_count: + description: "Number of ignored tests" + value: ${{ jobs.test.outputs.skipped_count }} + xfailed_count: + description: "Number of should_panic tests (analogous to xfail)" + value: ${{ jobs.test.outputs.xfailed_count }} + +jobs: + test: + runs-on: ${{ fromJSON(inputs.runs_on) }} + outputs: + total: ${{ steps.extract-results.outputs.total }} + passed: ${{ steps.extract-results.outputs.passed }} + percentage: ${{ steps.extract-results.outputs.percentage }} + collection_errors: ${{ steps.check-compilation.outputs.has_collection_errors }} + no_tests_found: ${{ steps.check-compilation.outputs.no_tests_found }} + has_errors: ${{ steps.check-compilation.outputs.has_errors }} + error_type: ${{ steps.check-compilation.outputs.error_type }} + failing_count: ${{ steps.extract-results.outputs.failing_count }} + error_count: ${{ steps.extract-results.outputs.error_count }} + skipped_count: ${{ steps.extract-results.outputs.skipped_count }} + xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} + + steps: + - name: Checkout + uses: actions/checkout@v4.2.2 + with: + submodules: "recursive" + ref: ${{ inputs.ref || github.ref }} + + - name: Set up Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ inputs.rust-version }} + + - name: Cache cargo registry and build + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + ${{ inputs.working_directory }}/target + key: cargo-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + cargo-${{ runner.os }}- + + - name: Check for compilation errors + id: check-compilation + working-directory: ${{ inputs.working_directory }} + run: | + echo "Running cargo check to detect compilation errors..." + + HAS_COLLECTION_ERRORS="false" + NO_TESTS_FOUND="false" + ERROR_TYPE="none" + + # Try to compile the test binaries + if ! cargo test --no-run 2>&1 | tee compilation_output.txt; then + echo "::error::Compilation errors detected" + HAS_COLLECTION_ERRORS="true" + + if grep -q "error\[E" compilation_output.txt; then + # Extract the first error code + ERROR_CODE=$(grep -o "error\[E[0-9]*\]" compilation_output.txt | head -1 || echo "CompilationError") + ERROR_TYPE="$ERROR_CODE" + else + ERROR_TYPE="CompilationError" + fi + else + # Count tests by doing a dry run + TEST_COUNT=$(cargo test --no-run 2>&1 | grep -c "Compiling\|Running" || echo "0") + + # Better detection: actually list tests + cargo test -- --list 2>&1 | tee test_list.txt || true + TEST_COUNT=$(grep -c ": test$" test_list.txt || echo "0") + + if [[ "$TEST_COUNT" == "0" ]]; then + echo "::warning::No tests were found" + NO_TESTS_FOUND="true" + ERROR_TYPE="NoTestsFound" + else + echo "Found $TEST_COUNT tests" + fi + fi + + echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> "$GITHUB_OUTPUT" + echo "no_tests_found=$NO_TESTS_FOUND" >> "$GITHUB_OUTPUT" + echo "error_type=$ERROR_TYPE" >> "$GITHUB_OUTPUT" + if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then + echo "has_errors=true" >> "$GITHUB_OUTPUT" + else + echo "has_errors=false" >> "$GITHUB_OUTPUT" + fi + + - name: Run tests + id: run-tests + continue-on-error: true + if: steps.check-compilation.outputs.has_collection_errors != 'true' + working-directory: ${{ inputs.working_directory }} + run: | + set -euo pipefail + + cgroup_auto_workers() { + local n="" + + # cgroup v2: /sys/fs/cgroup/cpu.max => " " or "max " + if [ -f /sys/fs/cgroup/cpu.max ]; then + local quota period + quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" + period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" + if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi + + # cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us + if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then + local quota period + quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" + period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" + if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi + + # cpuset fallback (v2: /sys/fs/cgroup/cpuset.cpus ; v1: /sys/fs/cgroup/cpuset/cpuset.cpus) + if [ -z "$n" ]; then + local f="" + if [ -f /sys/fs/cgroup/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset.cpus" + elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset/cpuset.cpus" + fi + + if [ -n "$f" ]; then + local spec + spec="$(cat "$f" | tr -d '[:space:]')" + if [ -n "$spec" ]; then + local count=0 + IFS=',' read -r -a parts <<< "$spec" + for p in "${parts[@]}"; do + if [[ "$p" == *-* ]]; then + local a="${p%%-*}" + local b="${p##*-}" + if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then + count=$((count + b - a + 1)) + fi + elif [[ "$p" =~ ^[0-9]+$ ]]; then + count=$((count + 1)) + fi + done + if [ "$count" -gt 0 ]; then + n="$count" + fi + fi + fi + fi + + if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then + n="1" + fi + + echo "$n" + } + + WORKERS="${{ inputs.parallel_workers }}" + if [ -z "$WORKERS" ]; then + if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then + WORKERS="6" + else + WORKERS="1" + fi + elif [ "$WORKERS" = "auto" ]; then + WORKERS="$(cgroup_auto_workers)" + fi + + echo "Running tests with $WORKERS threads..." + + # Set test threads for cargo + export RUST_TEST_THREADS=$WORKERS + + # Run cargo test with JSON output (unstable feature via nightly or cargo-nextest) + set +e + cargo test ${{ inputs.cargo_test_args }} -- --format json -Z unstable-options 2>&1 | tee test_output_raw.json + CARGO_EXIT=$? + set -e + + # If JSON output failed (stable toolchain), fall back to parsing standard output + if ! grep -q '"type":' test_output_raw.json 2>/dev/null; then + echo "JSON output not available, falling back to text parsing..." + set +e + cargo test ${{ inputs.cargo_test_args }} 2>&1 | tee test_output.txt + CARGO_EXIT=$? + set -e + + # Create a simple results format from text output + echo "{\"format\": \"text\", \"exit_code\": $CARGO_EXIT}" > test_output_raw.json + else + # Save raw JSON and copy to text output + cp test_output_raw.json test_output.txt + fi + + echo "cargo_exit_code=$CARGO_EXIT" >> "$GITHUB_OUTPUT" + + if [ "$CARGO_EXIT" -eq 137 ]; then + echo "::warning::Tests were killed (exit 137) - likely OOM. Partial results may be available." + fi + + echo "Test execution completed (exit code: $CARGO_EXIT)" + + - name: Extract test results + id: extract-results + working-directory: ${{ inputs.working_directory }} + run: | + python3 -c " + import json + import os + import re + + total = passed = 0 + percentage = 0.0 + passing_tests = [] + failing_tests = [] + error_tests = [] + skipped_tests = [] # Rust 'ignored' tests + xfailed_tests = [] # should_panic tests that passed + xpassed_tests = [] + all_tests = [] + skipped_with_reasons = {} + xfailed_with_reasons = {} + warnings_list = [] + + def parse_json_output(): + global total, passed, passing_tests, failing_tests, error_tests, skipped_tests, all_tests + + try: + with open('test_output_raw.json', 'r') as f: + content = f.read() + + # cargo test JSON output is newline-delimited JSON + for line in content.strip().split('\n'): + if not line.strip(): + continue + try: + event = json.loads(line) + + if event.get('type') == 'test': + name = event.get('name', '') + if not name: + continue + + if event.get('event') == 'started': + all_tests.append(name) + elif event.get('event') == 'ok': + passing_tests.append(name) + passed += 1 + total += 1 + elif event.get('event') == 'failed': + failing_tests.append(name) + total += 1 + elif event.get('event') == 'ignored': + skipped_tests.append(name) + reason = event.get('reason', 'No reason') + skipped_with_reasons[name] = reason + total += 1 + + elif event.get('type') == 'suite': + if event.get('event') == 'started': + # Reset for new suite if needed + pass + except json.JSONDecodeError: + continue + + return True + except FileNotFoundError: + return False + except Exception as e: + print(f'JSON parsing error: {e}') + return False + + def parse_text_output(): + global total, passed, passing_tests, failing_tests, error_tests, skipped_tests, all_tests, skipped_with_reasons + + try: + with open('test_output.txt', 'r') as f: + content = f.read() + + # Parse text output from cargo test + # Example: 'test tests::test_name ... ok' + # Example: 'test tests::test_name ... FAILED' + # Example: 'test tests::test_name ... ignored' + + test_pattern = re.compile(r'^test\s+(\S+)\s+\.\.\.\s+(\w+)', re.MULTILINE) + + for match in test_pattern.finditer(content): + name = match.group(1) + result = match.group(2).lower() + + all_tests.append(name) + total += 1 + + if result == 'ok': + passing_tests.append(name) + passed += 1 + elif result == 'failed': + failing_tests.append(name) + elif result == 'ignored': + skipped_tests.append(name) + skipped_with_reasons[name] = 'Test marked with #[ignore]' + + # Also parse summary line: 'test result: ok. 5 passed; 0 failed; 1 ignored' + summary_pattern = re.compile(r'test result:.*?(\d+)\s+passed;\s*(\d+)\s+failed;\s*(\d+)\s+ignored') + summary_match = summary_pattern.search(content) + + if summary_match and total == 0: + passed = int(summary_match.group(1)) + failed = int(summary_match.group(2)) + ignored = int(summary_match.group(3)) + total = passed + failed + ignored + + return True + except FileNotFoundError: + return False + except Exception as e: + print(f'Text parsing error: {e}') + return False + + # Try JSON first, fall back to text + if not parse_json_output(): + parse_text_output() + + # Calculate percentage + percentage = (passed / total * 100) if total > 0 else 0 + + # Extract warnings from compilation output + try: + with open('compilation_output.txt', 'r') as f: + content = f.read() + + warning_pattern = re.compile(r'^warning:.*$', re.MULTILINE) + for match in warning_pattern.finditer(content): + warnings_list.append(match.group(0)) + except: + pass + + # Save artifact data in same format as pytest + with open('test_data.json', 'w') as f: + json.dump({ + 'passing_tests': passing_tests, + 'failing_tests': failing_tests, + 'error_tests': error_tests, + 'skipped_tests': skipped_tests, + 'xfailed_tests': xfailed_tests, + 'xpassed_tests': xpassed_tests, + 'all_tests': all_tests, + 'skipped_tests_with_reasons': skipped_with_reasons, + 'xfailed_tests_with_reasons': xfailed_with_reasons, + 'warnings': warnings_list + }, f, indent=2) + + print(f'Results: {passed}/{total} ({percentage:.1f}%)') + + with open(os.environ['GITHUB_OUTPUT'], 'a') as f: + f.write(f'total={total}\n') + f.write(f'passed={passed}\n') + f.write(f'percentage={percentage:.2f}\n') + f.write(f'failing_count={len(failing_tests)}\n') + f.write(f'error_count={len(error_tests)}\n') + f.write(f'skipped_count={len(skipped_tests)}\n') + f.write(f'xfailed_count={len(xfailed_tests)}\n') + f.write(f'xpassed_count={len(xpassed_tests)}\n') + " + + - name: Create results.json for compatibility + if: always() + working-directory: ${{ inputs.working_directory }} + run: | + # Create a results.json file similar to pytest-json-report for compatibility + python3 -c " + import json + import os + + try: + with open('test_data.json', 'r') as f: + data = json.load(f) + + # Convert to pytest-like format + tests = [] + for name in data.get('passing_tests', []): + tests.append({'nodeid': name, 'outcome': 'passed'}) + for name in data.get('failing_tests', []): + tests.append({'nodeid': name, 'outcome': 'failed'}) + for name in data.get('error_tests', []): + tests.append({'nodeid': name, 'outcome': 'error'}) + for name in data.get('skipped_tests', []): + reason = data.get('skipped_tests_with_reasons', {}).get(name, 'No reason') + tests.append({'nodeid': name, 'outcome': 'skipped', 'longrepr': reason}) + for name in data.get('xfailed_tests', []): + reason = data.get('xfailed_tests_with_reasons', {}).get(name, 'No reason') + tests.append({'nodeid': name, 'outcome': 'xfailed', 'longrepr': reason}) + for name in data.get('xpassed_tests', []): + tests.append({'nodeid': name, 'outcome': 'xpassed'}) + + total = len(data.get('all_tests', [])) + passed = len(data.get('passing_tests', [])) + + results = { + 'exitcode': 0 if len(data.get('failing_tests', [])) == 0 else 1, + 'summary': { + 'total': total, + 'passed': passed + }, + 'tests': tests + } + + with open('results.json', 'w') as f: + json.dump(results, f, indent=2) + + except Exception as e: + print(f'Error creating results.json: {e}') + # Create minimal results file + with open('results.json', 'w') as f: + json.dump({'exitcode': 1, 'summary': {'total': 0, 'passed': 0}, 'tests': []}, f) + " + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.artifact_name }} + path: | + ${{ inputs.working_directory }}/test_data.json + ${{ inputs.working_directory }}/test_output.txt + ${{ inputs.working_directory }}/results.json + ${{ inputs.working_directory }}/compilation_output.txt + retention-days: 3 + if-no-files-found: ignore diff --git a/.github/workflows/test-storybook.yml b/.github/workflows/test-storybook.yml index 44c280b..2de12bf 100644 --- a/.github/workflows/test-storybook.yml +++ b/.github/workflows/test-storybook.yml @@ -12,23 +12,39 @@ on: description: "Port for Storybook server" required: false type: string - default: "3001" + default: "6006" target_branch_to_compare: description: "The target branch to compare against for regressions (e.g., main). If empty, regression check is skipped." required: false type: string default: "" runs_on: + description: "Runner label for the test jobs." required: false type: string default: '["self-hosted", "multithreaded"]' + parallel_workers: + description: "Number of parallel workers for Storybook tests. Leave empty for runner default (6 for multithreaded, 1 for singlethreaded). Use 'auto' for cgroup-aware CPU count, or a number." + required: false + type: string + default: "" + storybook_start_command: + description: "Command to start Storybook server." + required: false + type: string + default: "npm run storybook" + storybook_test_command: + description: "Command to run Storybook tests." + required: false + type: string + default: "npm run storybook-test" outputs: pr_has_errors: description: "Boolean indicating if the PR branch has Storybook test errors." value: ${{ jobs.test-pr-branch-storybook.outputs.has_errors }} pr_failing_stories_json: description: "JSON list of failing story IDs on the PR branch." - value: ${{ jobs.test-pr-branch-storybook.outputs.failing_stories_json }} + value: ${{ jobs.test-pr-branch-storybook.outputs.failing_items_json }} has_regressions: description: "Boolean indicating if Storybook test regressions were found." value: ${{ jobs.compare-results.outputs.has_regressions }} @@ -37,52 +53,187 @@ on: value: ${{ jobs.compare-results.outputs.regression_count }} jobs: - lint: - uses: ./.github/workflows/test-lint-js.yml # Assumes JS/TS linting for Storybook setup - permissions: - contents: write - test-target-branch-storybook: if: ${{ inputs.target_branch_to_compare != '' }} name: Test Target Branch Stories - needs: [lint] # Ensure linting passes before running tests runs-on: ${{ fromJSON(inputs.runs_on) }} outputs: - total: ${{ steps.normalise-target.outputs.total }} - passed: ${{ steps.normalise-target.outputs.passed }} - percentage: ${{ steps.normalise-target.outputs.percentage }} - passing_stories_json: ${{ steps.normalise-target.outputs.passing_items_json }} - collection_errors: ${{ steps.normalise-target.outputs.collection_errors }} - no_tests_found: ${{ steps.normalise-target.outputs.no_tests_found }} + total: ${{ steps.results.outputs.total }} + passed: ${{ steps.results.outputs.passed }} + percentage: ${{ steps.results.outputs.percentage }} + passing_stories_json: ${{ steps.results.outputs.passing_items_json }} + collection_errors: ${{ steps.results.outputs.collection_errors }} + no_tests_found: ${{ steps.results.outputs.no_tests_found }} + has_errors: ${{ steps.results.outputs.has_errors }} + error_type: ${{ steps.results.outputs.error_type }} + failing_count: ${{ steps.results.outputs.failing_count }} + error_count: ${{ steps.results.outputs.error_count }} + skipped_count: ${{ steps.results.outputs.skipped_count }} + xfailed_count: ${{ steps.results.outputs.xfailed_count }} + steps: + # Smart caching for target branch results + - name: Set cache keys + id: cache-keys + run: | + CACHE_VERSION="v1" + BASE_KEY="storybook-${CACHE_VERSION}-${{ inputs.target_branch_to_compare }}-${{ github.event.pull_request.base.sha || github.sha }}" + echo "base_key=$BASE_KEY" >> $GITHUB_OUTPUT + echo "pending_key=${BASE_KEY}-pending-${{ github.run_id }}" >> $GITHUB_OUTPUT + echo "Cache base key: $BASE_KEY" + + - name: Check for complete cache + id: cache-complete + uses: actions/cache/restore@v4 + with: + path: cached_target + key: ${{ steps.cache-keys.outputs.base_key }} + + - name: Check for pending cache + id: cache-pending + if: steps.cache-complete.outputs.cache-hit != 'true' + uses: actions/cache/restore@v4 + with: + path: cached_pending + key: ${{ steps.cache-keys.outputs.base_key }}-pending-impossible-match + restore-keys: | + ${{ steps.cache-keys.outputs.base_key }}-pending- + + - name: Determine initial status + id: initial-status + run: | + if [ "${{ steps.cache-complete.outputs.cache-hit }}" == "true" ]; then + echo "status=complete" >> $GITHUB_OUTPUT + echo "Found complete cache - will use it" + elif [ "${{ steps.cache-pending.outputs.cache-hit }}" == "true" ]; then + echo "status=pending" >> $GITHUB_OUTPUT + echo "Found pending cache - another job is running, will poll" + else + echo "status=miss" >> $GITHUB_OUTPUT + echo "No cache found - will run tests" + fi + + - name: Create pending marker + if: steps.initial-status.outputs.status == 'miss' + run: | + mkdir -p cached_pending_marker + echo "pending" > cached_pending_marker/status + echo "started=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> cached_pending_marker/status + echo "run_id=${{ github.run_id }}" >> cached_pending_marker/status + + - name: Save pending marker + if: steps.initial-status.outputs.status == 'miss' + uses: actions/cache/save@v4 + with: + path: cached_pending_marker + key: ${{ steps.cache-keys.outputs.pending_key }} + + - name: Poll for complete cache + id: poll-cache + if: steps.initial-status.outputs.status == 'pending' + env: + GH_TOKEN: ${{ github.token }} + run: | + echo "Another job is running tests, polling for results..." + TOTAL_WAIT=0 + MAX_WAIT=1200 + DELAY=5 + CACHE_KEY="${{ steps.cache-keys.outputs.base_key }}" + + while [ $TOTAL_WAIT -lt $MAX_WAIT ]; do + echo "Waiting ${DELAY}s... (${TOTAL_WAIT}s / ${MAX_WAIT}s elapsed)" + sleep $DELAY + TOTAL_WAIT=$((TOTAL_WAIT + DELAY)) + + CACHE_CHECK=$(gh cache list --key "$CACHE_KEY" --limit 1 2>/dev/null || echo "") + if echo "$CACHE_CHECK" | grep -q "$CACHE_KEY"; then + echo "Complete cache is now available!" + echo "found=true" >> $GITHUB_OUTPUT + break + fi + + DELAY=$((DELAY * 2)) + if [ $DELAY -gt 60 ]; then + DELAY=60 + fi + done + + if [ $TOTAL_WAIT -ge $MAX_WAIT ]; then + echo "Timeout after ${MAX_WAIT}s - will run tests ourselves" + echo "found=false" >> $GITHUB_OUTPUT + fi + + - name: Restore cache after poll + id: cache-after-poll + if: steps.poll-cache.outputs.found == 'true' + uses: actions/cache/restore@v4 + with: + path: cached_target + key: ${{ steps.cache-keys.outputs.base_key }} + + - name: Determine final status + id: final-status + run: | + if [ "${{ steps.cache-complete.outputs.cache-hit }}" == "true" ]; then + echo "cache_hit=true" >> $GITHUB_OUTPUT + elif [ "${{ steps.cache-after-poll.outputs.cache-hit }}" == "true" ]; then + echo "cache_hit=true" >> $GITHUB_OUTPUT + else + echo "cache_hit=false" >> $GITHUB_OUTPUT + fi + + - name: Load cached results + id: load-cache + if: steps.final-status.outputs.cache_hit == 'true' + run: | + echo "Loading cached target results" + if [ -f cached_target/outputs.env ]; then + cat cached_target/outputs.env >> $GITHUB_OUTPUT + fi + + - name: Upload cached artifact + if: steps.final-status.outputs.cache_hit == 'true' + uses: actions/upload-artifact@v4 + with: + name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} + path: cached_target/test_data.json + if-no-files-found: ignore + + # === Only run tests if no usable cache === - name: Checkout Target Branch + if: steps.final-status.outputs.cache_hit != 'true' uses: actions/checkout@v4.2.2 with: ref: ${{ inputs.target_branch_to_compare }} submodules: "recursive" - name: Use Node.js ${{ inputs.node-version }} + if: steps.final-status.outputs.cache_hit != 'true' uses: actions/setup-node@v4 with: node-version: ${{ inputs.node-version }} cache: "npm" - name: Install dependencies (Target) + if: steps.final-status.outputs.cache_hit != 'true' run: npm ci - name: Install Playwright browsers (Target) + if: steps.final-status.outputs.cache_hit != 'true' run: npx playwright install --with-deps - name: Run Storybook (Target) - run: npm run storybook -- --port ${{ inputs.storybook_port }} & + if: steps.final-status.outputs.cache_hit != 'true' + run: ${{ inputs.storybook_start_command }} -- --port ${{ inputs.storybook_port }} & - name: Wait for Storybook (Target) + if: steps.final-status.outputs.cache_hit != 'true' run: | echo "Waiting for Storybook (Target) to start on port ${{ inputs.storybook_port }}..." - timeout=120 # Increased timeout + timeout=120 counter=0 until $(curl --output /dev/null --silent --head --fail http://localhost:${{ inputs.storybook_port }}); do - if [ $counter -ge $timeout ]; then # Use -ge for counter check + if [ $counter -ge $timeout ]; then echo "Timed out waiting for Storybook (Target) to start" exit 1 fi @@ -94,26 +245,133 @@ jobs: - name: Run Storybook tests (Target) id: run-tests-target + if: steps.final-status.outputs.cache_hit != 'true' run: | - npm run storybook-test -- --json-report target_storybook_results.json || true - echo "Storybook tests on target branch completed." - if [ -f target_storybook_results.json ]; then - cat target_storybook_results.json - else - echo "target_storybook_results.json not found." - echo "{\"testResults\": [], \"numTotalTests\": 0, \"numPassedTests\": 0}" > target_storybook_results.json # Create empty results + set -euo pipefail + + cgroup_auto_workers() { + local n="" + if [ -f /sys/fs/cgroup/cpu.max ]; then + local quota period + quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" + period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" + if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi + if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then + local quota period + quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" + period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" + if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi + if [ -z "$n" ]; then + local f="" + if [ -f /sys/fs/cgroup/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset.cpus" + elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset/cpuset.cpus" + fi + if [ -n "$f" ]; then + local spec + spec="$(cat "$f" | tr -d '[:space:]')" + if [ -n "$spec" ]; then + local count=0 + IFS=',' read -r -a parts <<< "$spec" + for p in "${parts[@]}"; do + if [[ "$p" == *-* ]]; then + local a="${p%%-*}" + local b="${p##*-}" + if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then + count=$((count + b - a + 1)) + fi + elif [[ "$p" =~ ^[0-9]+$ ]]; then + count=$((count + 1)) + fi + done + if [ "$count" -gt 0 ]; then + n="$count" + fi + fi + fi + fi + if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then + n="1" + fi + echo "$n" + } + + WORKERS="${{ inputs.parallel_workers }}" + if [ -z "$WORKERS" ]; then + if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then + WORKERS="6" + else + WORKERS="1" + fi + elif [ "$WORKERS" = "auto" ]; then + WORKERS="$(cgroup_auto_workers)" + fi + + echo "Running Storybook tests with $WORKERS workers..." + + WORKER_FLAGS="" + if [ "$WORKERS" != "1" ]; then + WORKER_FLAGS="--maxWorkers=$WORKERS" + fi + + set +e + ${{ inputs.storybook_test_command }} -- --url http://localhost:${{ inputs.storybook_port }} $WORKER_FLAGS --json --outputFile=target_storybook_results.json 2>&1 | tee test_output.txt + TEST_EXIT=$? + set -e + + echo "Storybook tests on target branch completed (exit code: $TEST_EXIT)." + if [ ! -f target_storybook_results.json ]; then + echo '{"testResults": [], "numTotalTests": 0, "numPassedTests": 0}' > target_storybook_results.json fi - name: Normalise Storybook results (Target) id: normalise-target + if: steps.final-status.outputs.cache_hit != 'true' run: | - python "$GITHUB_WORKSPACE/.github/scripts/storybook_results_to_standard_json.py" \ + python3 "$GITHUB_WORKSPACE/.github/scripts/storybook_results_to_standard_json.py" \ target_storybook_results.json \ target_test_data.json \ --github-output "$GITHUB_OUTPUT" + - name: Save results to cache + if: steps.final-status.outputs.cache_hit != 'true' + run: | + mkdir -p cached_target + [ -f target_test_data.json ] && cp target_test_data.json cached_target/test_data.json + [ -f target_storybook_results.json ] && cp target_storybook_results.json cached_target/ + echo "complete" > cached_target/status + cat > cached_target/outputs.env << EOF + total=${{ steps.normalise-target.outputs.total || '0' }} + passed=${{ steps.normalise-target.outputs.passed || '0' }} + percentage=${{ steps.normalise-target.outputs.percentage || '0.00' }} + collection_errors=${{ steps.normalise-target.outputs.collection_errors || 'false' }} + no_tests_found=${{ steps.normalise-target.outputs.no_tests_found || 'false' }} + has_errors=${{ steps.normalise-target.outputs.has_failures || 'false' }} + error_type=none + failing_count=${{ steps.normalise-target.outputs.failed || '0' }} + error_count=0 + skipped_count=${{ steps.normalise-target.outputs.skipped || '0' }} + xfailed_count=${{ steps.normalise-target.outputs.xfailed || '0' }} + passing_items_json=${{ steps.normalise-target.outputs.passing_items_json || '[]' }} + EOF + sed -i 's/^[[:space:]]*//' cached_target/outputs.env + + - name: Upload to cache + if: steps.final-status.outputs.cache_hit != 'true' + uses: actions/cache/save@v4 + with: + path: cached_target + key: ${{ steps.cache-keys.outputs.base_key }} + - name: Upload target branch artifacts - if: always() + if: steps.final-status.outputs.cache_hit != 'true' uses: actions/upload-artifact@v4 with: name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} @@ -123,18 +381,53 @@ jobs: retention-days: 3 if-no-files-found: ignore + - name: Set final outputs + id: results + run: | + if [ "${{ steps.final-status.outputs.cache_hit }}" == "true" ]; then + echo "total=${{ steps.load-cache.outputs.total || '0' }}" >> $GITHUB_OUTPUT + echo "passed=${{ steps.load-cache.outputs.passed || '0' }}" >> $GITHUB_OUTPUT + echo "percentage=${{ steps.load-cache.outputs.percentage || '0.00' }}" >> $GITHUB_OUTPUT + echo "collection_errors=${{ steps.load-cache.outputs.collection_errors || 'false' }}" >> $GITHUB_OUTPUT + echo "no_tests_found=${{ steps.load-cache.outputs.no_tests_found || 'false' }}" >> $GITHUB_OUTPUT + echo "has_errors=${{ steps.load-cache.outputs.has_errors || 'false' }}" >> $GITHUB_OUTPUT + echo "error_type=${{ steps.load-cache.outputs.error_type || 'none' }}" >> $GITHUB_OUTPUT + echo "failing_count=${{ steps.load-cache.outputs.failing_count || '0' }}" >> $GITHUB_OUTPUT + echo "error_count=${{ steps.load-cache.outputs.error_count || '0' }}" >> $GITHUB_OUTPUT + echo "skipped_count=${{ steps.load-cache.outputs.skipped_count || '0' }}" >> $GITHUB_OUTPUT + echo "xfailed_count=${{ steps.load-cache.outputs.xfailed_count || '0' }}" >> $GITHUB_OUTPUT + echo "passing_items_json=${{ steps.load-cache.outputs.passing_items_json || '[]' }}" >> $GITHUB_OUTPUT + else + echo "total=${{ steps.normalise-target.outputs.total || '0' }}" >> $GITHUB_OUTPUT + echo "passed=${{ steps.normalise-target.outputs.passed || '0' }}" >> $GITHUB_OUTPUT + echo "percentage=${{ steps.normalise-target.outputs.percentage || '0.00' }}" >> $GITHUB_OUTPUT + echo "collection_errors=${{ steps.normalise-target.outputs.collection_errors || 'false' }}" >> $GITHUB_OUTPUT + echo "no_tests_found=${{ steps.normalise-target.outputs.no_tests_found || 'false' }}" >> $GITHUB_OUTPUT + echo "has_errors=${{ steps.normalise-target.outputs.has_failures || 'false' }}" >> $GITHUB_OUTPUT + echo "error_type=none" >> $GITHUB_OUTPUT + echo "failing_count=${{ steps.normalise-target.outputs.failed || '0' }}" >> $GITHUB_OUTPUT + echo "error_count=0" >> $GITHUB_OUTPUT + echo "skipped_count=${{ steps.normalise-target.outputs.skipped || '0' }}" >> $GITHUB_OUTPUT + echo "xfailed_count=${{ steps.normalise-target.outputs.xfailed || '0' }}" >> $GITHUB_OUTPUT + echo "passing_items_json=${{ steps.normalise-target.outputs.passing_items_json || '[]' }}" >> $GITHUB_OUTPUT + fi + test-pr-branch-storybook: name: Test PR Branch Stories - needs: [lint] runs-on: ${{ fromJSON(inputs.runs_on) }} outputs: has_errors: ${{ steps.run-tests-pr.outcome == 'failure' || steps.normalise-pr.outputs.has_failures == 'true' }} - failing_stories_json: ${{ steps.normalise-pr.outputs.failing_items_json }} + failing_items_json: ${{ steps.normalise-pr.outputs.failing_items_json }} total: ${{ steps.normalise-pr.outputs.total }} passed: ${{ steps.normalise-pr.outputs.passed }} percentage: ${{ steps.normalise-pr.outputs.percentage }} collection_errors: ${{ steps.normalise-pr.outputs.collection_errors }} no_tests_found: ${{ steps.normalise-pr.outputs.no_tests_found }} + failing_count: ${{ steps.normalise-pr.outputs.failed }} + error_count: ${{ steps.normalise-pr.outputs.error_count || '0' }} + skipped_count: ${{ steps.normalise-pr.outputs.skipped }} + xfailed_count: ${{ steps.normalise-pr.outputs.xfailed }} + steps: - name: Checkout Repository (PR) uses: actions/checkout@v4.2.2 @@ -142,7 +435,7 @@ jobs: submodules: "recursive" - name: Use Node.js ${{ inputs.node-version }} - uses: actions/setup-node@v4 # Use v4 consistently + uses: actions/setup-node@v4 with: node-version: ${{ inputs.node-version }} cache: "npm" @@ -154,15 +447,15 @@ jobs: run: npx playwright install --with-deps - name: Run Storybook (PR) - run: npm run storybook -- --port ${{ inputs.storybook_port }} & + run: ${{ inputs.storybook_start_command }} -- --port ${{ inputs.storybook_port }} & - name: Wait for Storybook (PR) run: | echo "Waiting for Storybook (PR) to start on port ${{ inputs.storybook_port }}..." - timeout=120 # Increased timeout + timeout=120 counter=0 until $(curl --output /dev/null --silent --head --fail http://localhost:${{ inputs.storybook_port }}); do - if [ $counter -ge $timeout ]; then # Use -ge + if [ $counter -ge $timeout ]; then echo "Timed out waiting for Storybook (PR) to start" exit 1 fi @@ -175,19 +468,94 @@ jobs: - name: Run Storybook tests (PR) id: run-tests-pr run: | - npm run storybook-test -- --json-report pr_storybook_results.json || true - echo "Storybook tests on PR branch completed." - if [ -f pr_storybook_results.json ]; then - cat pr_storybook_results.json - else - echo "pr_storybook_results.json not found." - echo "{\"testResults\": [], \"numTotalTests\": 0, \"numPassedTests\": 0, \"numFailedTests\": 0}" > pr_storybook_results.json # Create empty results + set -euo pipefail + + cgroup_auto_workers() { + local n="" + if [ -f /sys/fs/cgroup/cpu.max ]; then + local quota period + quota="$(awk '{print $1}' /sys/fs/cgroup/cpu.max)" + period="$(awk '{print $2}' /sys/fs/cgroup/cpu.max)" + if [ -n "$quota" ] && [ -n "$period" ] && [ "$quota" != "max" ] && [ "$period" != "0" ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi + if [ -z "$n" ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then + local quota period + quota="$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)" + period="$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)" + if [ "$quota" -gt 0 ] && [ "$period" -gt 0 ]; then + n="$(awk -v q="$quota" -v p="$period" 'BEGIN{print int((q+p-1)/p)}')" + fi + fi + if [ -z "$n" ]; then + local f="" + if [ -f /sys/fs/cgroup/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset.cpus" + elif [ -f /sys/fs/cgroup/cpuset/cpuset.cpus ]; then + f="/sys/fs/cgroup/cpuset/cpuset.cpus" + fi + if [ -n "$f" ]; then + local spec + spec="$(cat "$f" | tr -d '[:space:]')" + if [ -n "$spec" ]; then + local count=0 + IFS=',' read -r -a parts <<< "$spec" + for p in "${parts[@]}"; do + if [[ "$p" == *-* ]]; then + local a="${p%%-*}" + local b="${p##*-}" + if [[ "$a" =~ ^[0-9]+$ && "$b" =~ ^[0-9]+$ && "$b" -ge "$a" ]]; then + count=$((count + b - a + 1)) + fi + elif [[ "$p" =~ ^[0-9]+$ ]]; then + count=$((count + 1)) + fi + done + if [ "$count" -gt 0 ]; then + n="$count" + fi + fi + fi + fi + if [ -z "$n" ] || [ "$n" -lt 1 ] 2>/dev/null; then + n="1" + fi + echo "$n" + } + + WORKERS="${{ inputs.parallel_workers }}" + if [ -z "$WORKERS" ]; then + if echo '${{ inputs.runs_on }}' | grep -q "multithreaded"; then + WORKERS="6" + else + WORKERS="1" + fi + elif [ "$WORKERS" = "auto" ]; then + WORKERS="$(cgroup_auto_workers)" + fi + + echo "Running Storybook tests with $WORKERS workers..." + + WORKER_FLAGS="" + if [ "$WORKERS" != "1" ]; then + WORKER_FLAGS="--maxWorkers=$WORKERS" + fi + + set +e + ${{ inputs.storybook_test_command }} -- --url http://localhost:${{ inputs.storybook_port }} $WORKER_FLAGS --json --outputFile=pr_storybook_results.json 2>&1 | tee test_output.txt + TEST_EXIT=$? + set -e + + echo "Storybook tests on PR branch completed (exit code: $TEST_EXIT)." + if [ ! -f pr_storybook_results.json ]; then + echo '{"testResults": [], "numTotalTests": 0, "numPassedTests": 0, "numFailedTests": 0}' > pr_storybook_results.json fi - name: Normalise Storybook results (PR) id: normalise-pr run: | - python "$GITHUB_WORKSPACE/.github/scripts/storybook_results_to_standard_json.py" \ + python3 "$GITHUB_WORKSPACE/.github/scripts/storybook_results_to_standard_json.py" \ pr_storybook_results.json \ pr_test_data.json \ --github-output "$GITHUB_OUTPUT" @@ -245,7 +613,7 @@ jobs: name: Check Storybook Results & Regressions runs-on: ${{ fromJSON(inputs.runs_on) }} needs: [test-pr-branch-storybook, compare-results, perform-regression-analysis] - if: always() # Always run to give a final status + if: always() steps: - name: Evaluate Storybook Test Results run: | @@ -269,17 +637,14 @@ jobs: if [[ "$HAS_REGRESSIONS" == "true" ]]; then echo "::error::${REGRESSION_COUNT} Storybook test(s) regressed. Stories that were passing on target branch ('${{ inputs.target_branch_to_compare }}') are now failing/broken on the PR branch." - # Consider downloading and displaying the regression artifact for details. - exit 1 # Fail the workflow due to regressions + exit 1 fi fi - # If no regression analysis, or if regression analysis passed, check PR errors directly if [[ "$PR_HAS_ERRORS" == "true" ]]; then - echo "::error::Storybook tests failed on the PR branch. Check afor artifacts." - # The original test job might have already failed, this ensures it. + echo "::error::Storybook tests failed on the PR branch. Check artifacts for details." exit 1 fi - echo "✅ Storybook tests passed and no new regressions detected (if applicable)." + echo "Storybook tests passed and no new regressions detected (if applicable)." echo "--- End Storybook Test Results ---"