diff --git a/.github/workflows/build-next.yml b/.github/workflows/build-next.yml index 3887478..2bb4da2 100644 --- a/.github/workflows/build-next.yml +++ b/.github/workflows/build-next.yml @@ -14,7 +14,7 @@ on: default: "ubuntu-latest" jobs: lint: - uses: ./.github/workflows/test-lint-py.yml + uses: ./.github/workflows/test-py-lint.yml permissions: contents: write @@ -51,6 +51,6 @@ jobs: node-version: ${{ inputs.node-version }} - run: | npm i lightningcss-linux-x64-gnu @tailwindcss/oxide-linux-x64-gnu sass-embedded-linux-x64 - npm ci + npm i npm list zod --depth=0 npm run build diff --git a/.github/workflows/gh-projects-qa.yml b/.github/workflows/gh-projects-qa.yml index 8333cfc..ac25117 100644 --- a/.github/workflows/gh-projects-qa.yml +++ b/.github/workflows/gh-projects-qa.yml @@ -2,9 +2,6 @@ name: Reusable - Set Q/A Status in Zephyrex Board on: workflow_call: - secrets: - GITHUB_TOKEN: - required: true jobs: qa-update-status: diff --git a/.github/workflows/issue-blocker-reusuable.yml b/.github/workflows/issue-blocker-reusuable.yml new file mode 100644 index 0000000..2fab33c --- /dev/null +++ b/.github/workflows/issue-blocker-reusuable.yml @@ -0,0 +1,87 @@ +# .github/workflows/issue-blocker.yml + +name: "Reusable Issue Blocker Workflow" + +on: + workflow_dispatch: + workflow_call: + +permissions: + issues: write + +jobs: + clean-blocked-comments: + runs-on: ubuntu-latest + steps: + - name: Clean up 'Blocked by' references and manage issue status + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const owner = context.repo.owner; + const repo = context.repo.repo; + const regex = /#(\d+)/g; + + const issues = await github.rest.issues.listForRepo({ owner, repo, state: 'open' }); + + for (const issue of issues.data) { + if (issue.pull_request) continue; + + console.log(`\nšŸ” Issue #${issue.number}: ${issue.title}`); + + const comments = await github.rest.issues.listComments({ owner, repo, issue_number: issue.number }); + let updated = false; + + for (const comment of comments.data) { + if (!comment.body.toLowerCase().includes('blocked by')) continue; + + console.log(`šŸ’¬ Comment ID: ${comment.id}`); + console.log(`šŸ“„ Raw: ${JSON.stringify(comment.body)}`); + + const matches = [...comment.body.matchAll(regex)].map(m => parseInt(m[1])); + const stillOpen = []; + + for (const ref of matches) { + try { + const refIssue = await github.rest.issues.get({ owner, repo, issue_number: ref }); + if (refIssue.data.state === 'open') { + console.log(`šŸ”— Found reference to issue #${ref}\nā³ Issue #${ref} is still open.`); + stillOpen.push(`#${ref}`); + } else { + console.log(`šŸ”— Found reference to issue #${ref}\nāœ… Issue #${ref} is closed.`); + } + } catch (err) { + console.warn(`āš ļø Issue #${ref} could not be fetched.`); + } + } + + const newBody = stillOpen.length > 0 + ? `Blocked by ${stillOpen.join(' ')}` + : '[Auto-removed blocked-by reference: all blocking issues closed.]'; + + if (comment.body !== newBody) { + await github.rest.issues.updateComment({ + owner, + repo, + comment_id: comment.id, + body: newBody + }); + updated = true; + console.log(`āœļø Comment updated to: "${newBody}"`); + } else { + console.log(`āœ… Comment is already correct. No update needed.`); + } + } + + // Manage issue status using labels + const labels = issue.labels.map(l => l.name); + const hasBlocked = comments.data.some(c => c.body.includes('Blocked by #') && !c.body.includes('Auto-removed')); + + if (hasBlocked && !labels.includes('Blocked')) { + await github.rest.issues.addLabels({ owner, repo, issue_number: issue.number, labels: ['Blocked'] }); + console.log(`šŸ·ļø Label 'Blocked' added.`); + } else if (!hasBlocked && labels.includes('Blocked')) { + await github.rest.issues.setLabels({ owner, repo, issue_number: issue.number, labels: ['To-Do'] }); + console.log(`🚫 Label 'To-Do' added.`); + } + } \ No newline at end of file diff --git a/.github/workflows/meta-regression-analysis.yml b/.github/workflows/meta-regression-analysis.yml index 9f3f66a..026f527 100644 --- a/.github/workflows/meta-regression-analysis.yml +++ b/.github/workflows/meta-regression-analysis.yml @@ -71,10 +71,6 @@ jobs: ITEM_TYPE_SINGULAR_ENV: "${{ inputs.item_type_singular }}" ITEM_TYPE_PLURAL_ENV: "${{ inputs.item_type_plural }}" run: | - # Create debug file for detailed output - exec 3>&1 4>&2 - exec 1> >(tee -a debug_regression_analysis.log) 2>&1 - echo "Running regression analysis..." python3 - <<'EOF' @@ -159,9 +155,11 @@ jobs: regression_items_list = sorted(list(target_passing_set.intersection(pr_failing_set))) if regression_items_list: - print(f"Found {len(regression_items_list)} regression(s)!") has_regressions_output = "true" regression_count_output = len(regression_items_list) + print(f"Found {len(regression_items_list)} regression(s)!") + print("::error::Test Regressions Found!") + print(f"::error::{len(regression_items_list)} regression(s) detected. This job was intentionally failed.") with open("regression_details.txt", "w") as f: f.write(f"Found {len(regression_items_list)} {item_type_p} that were passing/clean in the target branch but are now failing/have issues in the PR branch:\n\n") for idx, item in enumerate(regression_items_list, 1): @@ -190,12 +188,9 @@ jobs: print("::warning::GITHUB_OUTPUT environment variable not set.") EOF - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - echo "Regression analysis script completed." - # Only show regression summary in logs if regressions found + # Show regression summary in logs if regressions found if [ -f "regression_details.txt" ]; then REGRESSION_COUNT=$(grep -c "^[0-9]\+\." regression_details.txt || echo "0") if [ "$REGRESSION_COUNT" -gt 0 ]; then @@ -205,13 +200,14 @@ jobs: fi fi - - name: Upload debug logs + - name: Upload regression analysis results if: always() uses: actions/upload-artifact@v4 with: - name: regression_analysis_debug_logs_${{ inputs.pr_number || inputs.run_id }} - path: debug_regression_analysis.log - retention-days: 7 + name: regression_analysis_results_${{ inputs.pr_number || inputs.run_id }} + path: | + regression_details.txt + retention-days: 3 if-no-files-found: ignore - name: Fail job if regressions are found @@ -219,16 +215,4 @@ jobs: run: | echo "Regressions detected. Failing the job as per configuration." REGRESSION_COUNT_VAL=${{ steps.check-regressions-script.outputs.REGRESSION_COUNT }} - echo "### :x: Regressions Found!" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY # Newline - echo "**$REGRESSION_COUNT_VAL regression(s) detected.** This job was intentionally failed." >> $GITHUB_STEP_SUMMARY - echo "The 'Upload regression details artifact' step will still attempt to upload details." >> $GITHUB_STEP_SUMMARY exit 1 - - - name: Upload regression details artifact - uses: actions/upload-artifact@v4 - if: always() - with: - name: regression_details_pr_${{ inputs.pr_number || inputs.run_id }}_${{ inputs.item_type_plural }} - path: regression_details.txt - retention-days: 7 diff --git a/.github/workflows/sync-meta.yml b/.github/workflows/sync-meta.yml new file mode 100644 index 0000000..5c58ae7 --- /dev/null +++ b/.github/workflows/sync-meta.yml @@ -0,0 +1,77 @@ +# Reusable Workflow: Workflows/.github/workflows/sync-meta.yml + +name: Reusable Metadata Sync + +on: + workflow_call: + secrets: + PAT_TOKEN: + required: true + inputs: + sync_labels: + required: false + type: boolean + default: true + sync_templates: + required: false + type: boolean + default: true + sync_milestones: + required: false + type: boolean + default: true + sync_branch_rules: + required: false + type: boolean + default: true + runs_on: + required: false + type: string + default: "ubuntu-latest" + +jobs: + sync: + runs-on: ${{ inputs.runs_on }} + steps: + - name: Checkout source repo (self) + uses: actions/checkout@v4 + with: + repository: JamesonRGrieve/Workflows + token: ${{ secrets.PAT_TOKEN }} + path: source + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install dependencies + run: pip install requests PyGithub + + - name: Sync Labels + if: ${{ inputs.sync_labels }} + run: | + python3 source/scripts/sync_labels.py + env: + PAT_TOKEN: ${{ secrets.PAT_TOKEN }} + + - name: Sync Templates + if: ${{ inputs.sync_templates }} + run: | + python3 source/scripts/sync_templates.py + env: + PAT_TOKEN: ${{ secrets.PAT_TOKEN }} + + - name: Sync Milestones + if: ${{ inputs.sync_milestones }} + run: | + python3 source/scripts/sync_milestones.py + env: + PAT_TOKEN: ${{ secrets.PAT_TOKEN }} + + - name: Sync Branch Protection Rules + if: ${{ inputs.sync_branch_rules }} + run: | + python3 source/scripts/sync_branch_protection.py + env: + PAT_TOKEN: ${{ secrets.PAT_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/test-bandit.yml b/.github/workflows/test-bandit.yml new file mode 100644 index 0000000..e67951d --- /dev/null +++ b/.github/workflows/test-bandit.yml @@ -0,0 +1,130 @@ +name: Reusable Bandit Security Check with Regression Detection + +# This reusable workflow is triggered by other workflows using 'workflow_call' +on: + workflow_call: + inputs: + target_branch_to_compare: + description: "Target branch to compare against (e.g., main)" + required: true + type: string + runs_on: + required: false + type: string + default: "ubuntu-latest" + outputs: + bandit_issues_json: + description: "JSON output of Bandit issues on PR branch" + value: ${{ jobs.run-bandit.outputs.bandit_issues_json }} + +jobs: + # Job 1: Run Bandit on the PR branch + run-bandit: + name: Run Bandit on PR Branch & Extract Results + runs-on: ${{ inputs.runs_on }} + outputs: + bandit_issues_json: ${{ steps.extract-pr.outputs.BANDIT_JSON }} + steps: + # Step 1: Checkout the current pull request code + - name: Checkout PR Branch + uses: actions/checkout@v4.1.1 + with: + persist-credentials: false + + # Step 2: Set up Python 3.10 environment + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + # Step 3: Install Bandit (Python security scanner) + - name: Install Bandit + run: pip install bandit + + # Step 4: Run Bandit and output results to a file + - name: Run Bandit on PR Branch + run: | + bandit -r . -lll -f json -o pr_bandit_output.json || true + + # Step 5: Upload the results as a GitHub Actions artifact (for debugging or reporting) + - name: Upload PR Artifact + uses: actions/upload-artifact@v4 + with: + name: pr_bandit_output + path: pr_bandit_output.json + + # Step 6: Extract the raw issue list from the Bandit JSON output + - name: Extract PR Bandit JSON + id: extract-pr + run: | + CONTENT=$(cat pr_bandit_output.json | jq -c '.results') + echo "BANDIT_JSON=$CONTENT" >> $GITHUB_OUTPUT + + # Job 2: Run Bandit on the target branch for comparison + run-bandit-on-target: + name: Run Bandit on Target Branch + runs-on: ${{ inputs.runs_on }} + outputs: + bandit_target_json: ${{ steps.extract-target.outputs.TARGET_JSON }} + steps: + # Step 1: Checkout the base branch (e.g., main) + - name: Checkout Target Branch + uses: actions/checkout@v4 + with: + ref: ${{ inputs.target_branch_to_compare }} + persist-credentials: false + + # Step 2: Set up Python environment + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + # Step 3: Install Bandit + - name: Install Bandit + run: pip install bandit + + # Step 4: Run Bandit and save output + - name: Run Bandit on Target Branch + run: | + bandit -r . -lll -f json -o target_bandit_output.json || true + + # Step 5: Upload results from the target branch + - name: Upload Target Artifact + uses: actions/upload-artifact@v4 + with: + name: target_bandit_output + path: target_bandit_output.json + + # Step 6: Extract raw issue list from the Bandit output + - name: Extract Target Bandit JSON + id: extract-target + run: | + CONTENT=$(cat target_bandit_output.json | jq -c '.results') + echo "TARGET_JSON=$CONTENT" >> $GITHUB_OUTPUT + + # Job 3: Compare the PR results against the target to detect regressions + compare-bandit: + name: Compare Bandit Issues (Regression Analysis) + runs-on: ${{ inputs.runs_on }} + needs: [run-bandit, run-bandit-on-target] + steps: + - name: Compare JSON + run: | + echo "Comparing Bandit results between PR and target branch..." + + echo "${{ needs.run-bandit.outputs.bandit_issues_json }}" > pr.json + echo "${{ needs.run-bandit-on-target.outputs.bandit_target_json }}" > target.json + + # Compare both JSON lists to find issues present in PR but not in target + NEW_ISSUES=$(jq -n --argfile pr pr.json --argfile base target.json ' + $pr - $base | length') + + echo "New security issues introduced: $NEW_ISSUES" + + if [ "$NEW_ISSUES" -gt 0 ]; then + echo "::error::New Bandit issues introduced in PR branch." + exit 1 + else + echo "No new security issues introduced." + fi diff --git a/.github/workflows/test-py-pytest.yml b/.github/workflows/test-py-pytest.yml index 155aa61..ac7c8a1 100644 --- a/.github/workflows/test-py-pytest.yml +++ b/.github/workflows/test-py-pytest.yml @@ -38,9 +38,6 @@ on: pr_percentage: description: "Pass percentage in PR/source branch" value: ${{ jobs.test-source-branch.outputs.percentage }} - pr_failing_tests: - description: "List of failing tests in PR/source branch (JSON string)" - value: ${{ jobs.test-source-branch.outputs.failing_tests }} pr_collection_errors: description: "PR branch has collection errors" value: ${{ jobs.test-source-branch.outputs.collection_errors }} @@ -56,9 +53,6 @@ on: target_percentage: description: "Pass percentage in target branch" value: ${{ jobs.test-target-branch.outputs.percentage }} - target_passing_tests: - description: "List of passing tests in target branch (JSON string)" - value: ${{ jobs.test-target-branch.outputs.passing_tests }} has_regressions: description: "Boolean indicating if regressions were found" value: ${{ jobs.compare-results.outputs.has_regressions }} @@ -67,13 +61,7 @@ on: value: ${{ jobs.compare-results.outputs.regression_count }} jobs: - lint: - uses: ./.github/workflows/test-py-lint.yml - permissions: - contents: write # Lint job might push changes - test-source-branch: - needs: lint runs-on: ${{ inputs.runs_on }} outputs: total: ${{ steps.extract-results.outputs.total }} @@ -84,12 +72,9 @@ jobs: has_errors: ${{ steps.check-collection.outputs.has_errors }} error_type: ${{ steps.check-collection.outputs.error_type }} error_details: ${{ steps.check-collection.outputs.error_details }} - failing_tests: ${{ steps.extract-results.outputs.failing_tests }} - skipped_tests: ${{ steps.extract-results.outputs.skipped_tests }} - xfailed_tests: ${{ steps.extract-results.outputs.xfailed_tests }} - all_tests: ${{ steps.extract-results.outputs.all_tests }} - skipped_tests_with_reasons: ${{ steps.extract-results.outputs.skipped_tests_with_reasons }} - xfailed_tests_with_reasons: ${{ steps.extract-results.outputs.xfailed_tests_with_reasons }} + failing_count: ${{ steps.extract-results.outputs.failing_count }} + skipped_count: ${{ steps.extract-results.outputs.skipped_count }} + xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} steps: - name: Checkout PR Branch @@ -104,7 +89,6 @@ jobs: - name: Install dependencies run: | - set -x python -m pip install --upgrade pip pip install pytest pytest-json-report pytest-asyncio if [ -f requirements.txt ]; then @@ -114,17 +98,9 @@ jobs: - name: Check for test collection errors id: check-collection run: | - set -x - # Create verbose debug file for artifact - exec 3>&1 4>&2 - exec 1> >(tee -a debug_collection.log) 2>&1 - echo "Running pytest collection check..." python -m pytest --collect-only -v > collection_output.txt 2>&1 || true - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - # Set default values HAS_COLLECTION_ERRORS="false" NO_TESTS_FOUND="false" @@ -158,7 +134,6 @@ jobs: grep -A 15 "$ERROR_FILE" collection_output.txt > error_details.txt ERROR_DETAILS=$(cat error_details.txt | tr '\n' ' ' | sed 's/"/\\"/g') else - # If we couldn't find a specific file, get general error info grep -A 15 "ImportError\|ModuleNotFoundError\|SyntaxError\|ERROR collecting\|Interrupted:" collection_output.txt | head -20 > error_details.txt ERROR_DETAILS=$(cat error_details.txt | tr '\n' ' ' | sed 's/"/\\"/g') fi @@ -193,51 +168,37 @@ jobs: echo "has_errors=false" >> $GITHUB_OUTPUT fi - # Save full collection output to debug file for artifact - echo "=== FULL COLLECTION OUTPUT ===" >> debug_collection.log - cat collection_output.txt >> debug_collection.log + # Clean summary output + if [[ "$HAS_COLLECTION_ERRORS" == "true" ]]; then + echo "āŒ Discovery Error: $ERROR_TYPE" + elif [[ "$NO_TESTS_FOUND" == "true" ]]; then + echo "āš ļø No Tests Found" + else + echo "āœ… Discovery Success" + fi - name: Run tests on PR Branch if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - set -x - # Create verbose debug file for artifact - exec 3>&1 4>&2 - exec 1> >(tee -a debug_test_run.log) 2>&1 - echo "Running tests on PR branch..." - python -m pytest -vv --json-report --json-report-file=pr_results.json || true - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 + python -m pytest -vv --json-report --json-report-file=pr_results.json --tb=short > test_output.txt 2>&1 || true if [ -f pr_results.json ]; then - echo "Test results file successfully created for PR branch" - # Save first 200 chars to debug file only - echo "=== JSON RESULTS PREVIEW ===" >> debug_test_run.log - head -c 200 pr_results.json >> debug_test_run.log + echo "āœ… Test execution completed" else - echo "::error::Failed to create test results file for PR branch" + echo "āŒ Test execution failed" fi - - name: Extract test results + - name: Extract test results and create artifacts id: extract-results run: | - set -x echo "PR_BRANCH=$(git rev-parse --abbrev-ref HEAD)" >> $GITHUB_ENV - echo "Processing test results for PR branch: $PR_BRANCH" - - # Create debug file for detailed output - exec 3>&1 4>&2 - exec 1> >(tee -a debug_extract_results.log) 2>&1 python -c " import json import sys import os - print('Starting test results extraction script for PR branch') - # Default values in case file doesn't exist or is invalid pr_total = 0 pr_passed = 0 @@ -246,6 +207,7 @@ jobs: skipped_tests = [] xfailed_tests = [] all_tests = [] + passing_tests = [] skipped_tests_with_reasons = {} xfailed_tests_with_reasons = {} @@ -259,7 +221,7 @@ jobs: if pr_results.get('exitcode', 0) > 1: print('Detected non-zero exitcode, likely a collection error') if 'collectors' in pr_results and pr_results['collectors']: - print(f'Collection errors found: {pr_results[\"collectors\"]}') + print(f'Collection errors found: {pr_results["collectors"]}') pr_total = 0 # Explicitly set to 0 - no tests run when collection fails pr_passed = 0 elif 'summary' in pr_results and isinstance(pr_results['summary'], dict): @@ -277,7 +239,9 @@ jobs: nodeid = test.get('nodeid', '') if nodeid: all_tests.append(nodeid) # Track all tests regardless of outcome - if outcome in ['failed', 'error']: + if outcome == 'passed': + passing_tests.append(nodeid) + elif outcome in ['failed', 'error']: failing_tests.append(nodeid) elif outcome == 'skipped': skipped_tests.append(nodeid) @@ -307,6 +271,7 @@ jobs: xfail_reason = str(test['call']['longrepr']) xfailed_tests_with_reasons[nodeid] = xfail_reason.strip() + print(f'Found {len(passing_tests)} passing tests') print(f'Found {len(failing_tests)} failing tests') print(f'Found {len(skipped_tests)} skipped tests') print(f'Found {len(xfailed_tests)} xfailed tests') @@ -325,7 +290,7 @@ jobs: if 'pr_results' in locals(): print(f'Available keys: {list(pr_results.keys())}') if 'summary' in pr_results: - print(f'Summary structure: {pr_results[\"summary\"]}') + print(f'Summary structure: {pr_results["summary"]}') except Exception as e: print(f'Error processing results: {e}') import traceback @@ -339,108 +304,88 @@ jobs: print(f'Xfailed tests: {len(xfailed_tests)}') print(f'All discovered tests: {len(all_tests)}') - # Set outputs for GitHub Actions - print('Writing results to GITHUB_OUTPUT') + # Set scalar outputs only (no large arrays) with open(os.environ['GITHUB_OUTPUT'], 'a') as f: f.write(f'total={pr_total}\\n') f.write(f'passed={pr_passed}\\n') f.write(f'percentage={pr_percentage:.2f}\\n') - # Write test lists as compact JSON strings to avoid issues with large outputs - if failing_tests: - f.write(f'failing_tests={json.dumps(failing_tests)}\\n') - else: - f.write('failing_tests=[]\\n') - if skipped_tests: - f.write(f'skipped_tests={json.dumps(skipped_tests)}\\n') - else: - f.write('skipped_tests=[]\\n') - if xfailed_tests: - f.write(f'xfailed_tests={json.dumps(xfailed_tests)}\\n') - else: - f.write('xfailed_tests=[]\\n') - if all_tests: - f.write(f'all_tests={json.dumps(all_tests)}\\n') - else: - f.write('all_tests=[]\\n') - # Write test reason mappings - if skipped_tests_with_reasons: - f.write(f'skipped_tests_with_reasons={json.dumps(skipped_tests_with_reasons)}\\n') - else: - f.write('skipped_tests_with_reasons={}\\n') - if xfailed_tests_with_reasons: - f.write(f'xfailed_tests_with_reasons={json.dumps(xfailed_tests_with_reasons)}\\n') - else: - f.write('xfailed_tests_with_reasons={}\\n') + f.write(f'failing_count={len(failing_tests)}\\n') + f.write(f'skipped_count={len(skipped_tests)}\\n') + f.write(f'xfailed_count={len(xfailed_tests)}\\n') + # Extract warnings from test output + warnings_list = [] + try: + with open('test_output.txt', 'r') as f: + content = f.read() + # Extract warnings section + if '============================== warnings summary ===============================' in content: + warnings_section = content.split('============================== warnings summary ===============================')[1] + if '-- Docs:' in warnings_section: + warnings_section = warnings_section.split('-- Docs:')[0] + + # Parse warnings - format is file path followed by indented warning details + lines = warnings_section.split('\\n') + current_warning_group = [] + + for line in lines: + line = line.rstrip() + if not line or line.startswith('='): + continue + + # Check if this is a file path (starts at column 0, ends with .py: or contains warning count) + if not line.startswith(' ') and ('.py:' in line or 'warnings' in line): + # Save previous warning group if exists + if current_warning_group: + warnings_list.append('\\n'.join(current_warning_group)) + # Start new warning group + current_warning_group = [line] + elif line.startswith(' ') and current_warning_group: + # This is part of the current warning (indented line) + current_warning_group.append(line) + + # Don't forget the last warning group + if current_warning_group: + warnings_list.append('\\n'.join(current_warning_group)) + + print(f'Extracted {len(warnings_list)} warning groups from test output') + except Exception as e: + print(f'Could not extract warnings: {e}') + + # Save test lists to artifact files instead of job outputs + test_data = { + 'passing_tests': passing_tests, + 'failing_tests': failing_tests, + 'skipped_tests': skipped_tests, + 'xfailed_tests': xfailed_tests, + 'all_tests': all_tests, + 'skipped_tests_with_reasons': skipped_tests_with_reasons, + 'xfailed_tests_with_reasons': xfailed_tests_with_reasons, + 'warnings': warnings_list + } + + with open('pr_test_data.json', 'w') as f: + json.dump(test_data, f, indent=2) + + print('Test data saved to pr_test_data.json for artifact') print('Results extraction completed') " - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 + echo "āœ… Test results: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} passed (${{ steps.extract-results.outputs.percentage }}%)" - echo "PR branch test results processed: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} tests passed (${{ steps.extract-results.outputs.percentage }}%)" - - - name: Upload PR branch debug logs + - name: Upload PR branch artifacts if: always() uses: actions/upload-artifact@v4 with: - name: pr_branch_debug_logs_${{ github.event.pull_request.number || github.run_id }} + name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} path: | - debug_*.log - collection_output.txt - error_details.txt + pr_test_data.json + test_output.txt pr_results.json retention-days: 3 if-no-files-found: ignore - - name: Create PR branch test data artifact - if: always() - run: | - set -x - echo "Creating PR branch test data for regression analysis..." - - # Create directory for artifact - mkdir -p pr_test_data - - # Extract failing tests from outputs and save to JSON file - python3 - << 'EOF' - import json - import os - - # Get failing tests from the step outputs (if available) - failing_tests_str = '''${{ steps.extract-results.outputs.failing_tests || '[]' }}''' - - try: - if failing_tests_str and failing_tests_str != '[]': - failing_tests = json.loads(failing_tests_str) - else: - failing_tests = [] - - print(f"Saving {len(failing_tests)} failing tests to artifact") - - # Save to JSON file for artifact - with open('pr_test_data/failing_items.json', 'w') as f: - json.dump(failing_tests, f, indent=2) - - print("PR branch test data saved successfully") - - except Exception as e: - print(f"Error saving PR branch test data: {e}") - # Create empty file as fallback - with open('pr_test_data/failing_items.json', 'w') as f: - json.dump([], f) - EOF - - - name: Upload PR branch test data artifact - uses: actions/upload-artifact@v4 - if: always() - with: - name: pr_branch_test_data_${{ github.event.pull_request.number || github.run_id }} - path: pr_test_data/ - retention-days: 1 - test-target-branch: - needs: lint runs-on: ${{ inputs.runs_on }} outputs: total: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.total || steps.extract-results.outputs.total }} @@ -451,8 +396,7 @@ jobs: has_errors: ${{ steps.check-collection.outputs.has_errors }} error_type: ${{ steps.check-collection.outputs.error_type }} error_details: ${{ steps.check-collection.outputs.error_details }} - passing_tests: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passing_tests || steps.extract-results.outputs.passing_tests }} - all_tests: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.all_tests || steps.extract-results.outputs.all_tests }} + passing_count: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passing_count || steps.extract-results.outputs.passing_count }} steps: - name: Checkout target branch @@ -468,7 +412,6 @@ jobs: - name: Install dependencies run: | - set -x python -m pip install --upgrade pip pip install pytest pytest-json-report pytest-asyncio if [ -f requirements.txt ]; then @@ -478,7 +421,6 @@ jobs: - name: Check for test collection errors id: check-collection run: | - set -x # Create verbose debug file for artifact exec 3>&1 4>&2 exec 1> >(tee -a debug_target_collection.log) 2>&1 @@ -522,7 +464,6 @@ jobs: grep -A 15 "$ERROR_FILE" collection_output.txt > error_details.txt ERROR_DETAILS=$(cat error_details.txt | tr '\n' ' ' | sed 's/"/\\"/g') else - # If we couldn't find a specific file, get general error info grep -A 15 "ImportError\|ModuleNotFoundError\|SyntaxError\|ERROR collecting\|Interrupted:" collection_output.txt | head -20 > error_details.txt ERROR_DETAILS=$(cat error_details.txt | tr '\n' ' ' | sed 's/"/\\"/g') fi @@ -564,32 +505,20 @@ jobs: - name: Run tests on target branch if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - set -x - # Create verbose debug file for artifact - exec 3>&1 4>&2 - exec 1> >(tee -a debug_target_test_run.log) 2>&1 - echo "Running tests on target branch..." - python -m pytest -vv --json-report --json-report-file=target_results.json || true - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 + python -m pytest -vv --json-report --json-report-file=target_results.json --tb=short > target_test_output.txt 2>&1 || true if [ -f target_results.json ]; then - echo "Test results file successfully created for target branch" - # Save first 200 chars to debug file only - echo "=== JSON RESULTS PREVIEW ===" >> debug_target_test_run.log - head -c 200 target_results.json >> debug_target_test_run.log + echo "āœ… Test execution completed" else - echo "::warning::Failed to create test results file for target branch" + echo "āŒ Test execution failed" fi - - name: Extract test results + - name: Extract test results and create artifacts id: extract-results # Only run if there were no collection errors if: steps.check-collection.outputs.has_collection_errors != 'true' run: | - set -x echo "Processing test results for target branch: ${{ inputs.target_branch_to_compare }}" # Create debug file for detailed output @@ -608,6 +537,9 @@ jobs: target_passed = 0 target_percentage = 0 passing_tests = [] + failing_tests = [] + skipped_tests = [] + xfailed_tests = [] all_tests = [] try: @@ -620,7 +552,7 @@ jobs: if target_results.get('exitcode', 0) > 1: print('Detected non-zero exitcode, likely a collection error') if 'collectors' in target_results and target_results['collectors']: - print(f'Collection errors found: {target_results[\"collectors\"]}') + print(f'Collection errors found: {target_results["collectors"]}') target_total = 0 # Explicitly set to 0 - no tests run when collection fails target_passed = 0 elif 'summary' in target_results and isinstance(target_results['summary'], dict): @@ -630,9 +562,9 @@ jobs: target_passed = summary.get('passed', 0) print(f'Results extracted from summary - Total: {target_total}, Passed: {target_passed}') - # Extract passing tests and all tests + # Extract all test outcomes if 'tests' in target_results: - print('Extracting passing tests and all discovered tests') + print('Extracting all test outcomes from target') for test in target_results['tests']: outcome = test.get('outcome') nodeid = test.get('nodeid', '') @@ -640,8 +572,17 @@ jobs: all_tests.append(nodeid) # Track all tests regardless of outcome if outcome == 'passed': passing_tests.append(nodeid) + elif outcome in ['failed', 'error']: + failing_tests.append(nodeid) + elif outcome == 'skipped': + skipped_tests.append(nodeid) + elif outcome == 'xfailed': + xfailed_tests.append(nodeid) print(f'Found {len(passing_tests)} passing tests') + print(f'Found {len(failing_tests)} failing tests') + print(f'Found {len(skipped_tests)} skipped tests') + print(f'Found {len(xfailed_tests)} xfailed tests') print(f'Found {len(all_tests)} total discovered tests') else: print('No valid summary structure found') @@ -657,7 +598,7 @@ jobs: if 'target_results' in locals(): print(f'Available keys: {list(target_results.keys())}') if 'summary' in target_results: - print(f'Summary structure: {target_results[\"summary\"]}') + print(f'Summary structure: {target_results["summary"]}') except Exception as e: print(f'Error processing results: {e}') import traceback @@ -669,22 +610,66 @@ jobs: print(f'Passing tests: {len(passing_tests)}') print(f'All discovered tests: {len(all_tests)}') - # Set outputs for GitHub Actions - print('Writing results to GITHUB_OUTPUT') + # Set scalar outputs only (no large arrays) with open(os.environ['GITHUB_OUTPUT'], 'a') as f: f.write(f'total={target_total}\\n') f.write(f'passed={target_passed}\\n') f.write(f'percentage={target_percentage:.2f}\\n') - # Write test lists as compact JSON strings to avoid issues with large outputs - if passing_tests: - f.write(f'passing_tests={json.dumps(passing_tests)}\\n') - else: - f.write('passing_tests=[]\\n') - if all_tests: - f.write(f'all_tests={json.dumps(all_tests)}\\n') - else: - f.write('all_tests=[]\\n') + f.write(f'passing_count={len(passing_tests)}\\n') + # Extract warnings from test output + warnings_list = [] + try: + with open('target_test_output.txt', 'r') as f: + content = f.read() + # Extract warnings section + if '============================== warnings summary ===============================' in content: + warnings_section = content.split('============================== warnings summary ===============================')[1] + if '-- Docs:' in warnings_section: + warnings_section = warnings_section.split('-- Docs:')[0] + + # Parse warnings - format is file path followed by indented warning details + lines = warnings_section.split('\\n') + current_warning_group = [] + + for line in lines: + line = line.rstrip() + if not line or line.startswith('='): + continue + + # Check if this is a file path (starts at column 0, ends with .py: or contains warning count) + if not line.startswith(' ') and ('.py:' in line or 'warnings' in line): + # Save previous warning group if exists + if current_warning_group: + warnings_list.append('\\n'.join(current_warning_group)) + # Start new warning group + current_warning_group = [line] + elif line.startswith(' ') and current_warning_group: + # This is part of the current warning (indented line) + current_warning_group.append(line) + + # Don't forget the last warning group + if current_warning_group: + warnings_list.append('\\n'.join(current_warning_group)) + + print(f'Extracted {len(warnings_list)} warning groups from target test output') + except Exception as e: + print(f'Could not extract warnings: {e}') + + # Save test lists to artifact files instead of job outputs + test_data = { + 'passing_tests': passing_tests, + 'failing_tests': failing_tests, + 'skipped_tests': skipped_tests, + 'xfailed_tests': xfailed_tests, + 'all_tests': all_tests, + 'warnings': warnings_list + } + + with open('target_test_data.json', 'w') as f: + json.dump(test_data, f, indent=2) + + print('Test data saved to target_test_data.json for artifact') print('Results extraction completed') " @@ -693,65 +678,18 @@ jobs: echo "Target branch test results processed: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} tests passed (${{ steps.extract-results.outputs.percentage }}%)" - - name: Upload target branch debug logs + - name: Upload target branch artifacts if: always() uses: actions/upload-artifact@v4 with: - name: target_branch_debug_logs_${{ github.event.pull_request.number || github.run_id }} + name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} path: | - debug_target_*.log - collection_output.txt - error_details.txt + target_test_data.json + target_test_output.txt target_results.json retention-days: 3 if-no-files-found: ignore - - name: Create target branch test data artifact - if: always() - run: | - set -x - echo "Creating target branch test data for regression analysis..." - - # Create directory for artifact - mkdir -p target_test_data - - # Extract passing tests from outputs and save to JSON file - python3 - << 'EOF' - import json - import os - - # Get passing tests from the step outputs (if available) - passing_tests_str = '''${{ steps.extract-results.outputs.passing_tests || steps.set-error-outputs.outputs.passing_tests || '[]' }}''' - - try: - if passing_tests_str and passing_tests_str != '[]': - passing_tests = json.loads(passing_tests_str) - else: - passing_tests = [] - - print(f"Saving {len(passing_tests)} passing tests to artifact") - - # Save to JSON file for artifact - with open('target_test_data/passing_items.json', 'w') as f: - json.dump(passing_tests, f, indent=2) - - print("Target branch test data saved successfully") - - except Exception as e: - print(f"Error saving target branch test data: {e}") - # Create empty file as fallback - with open('target_test_data/passing_items.json', 'w') as f: - json.dump([], f) - EOF - - - name: Upload target branch test data artifact - uses: actions/upload-artifact@v4 - if: always() - with: - name: target_branch_test_data_${{ github.event.pull_request.number || github.run_id }} - path: target_test_data/ - retention-days: 1 - # Add a step to set default outputs when collection errors are detected - name: Set collection error outputs id: set-error-outputs @@ -761,8 +699,7 @@ jobs: echo "total=0" >> $GITHUB_OUTPUT echo "passed=0" >> $GITHUB_OUTPUT echo "percentage=0.00" >> $GITHUB_OUTPUT - echo "passing_tests=[]" >> $GITHUB_OUTPUT - echo "all_tests=[]" >> $GITHUB_OUTPUT + echo "passing_count=0" >> $GITHUB_OUTPUT compare-results: needs: [test-source-branch, test-target-branch] @@ -777,103 +714,181 @@ jobs: sudo apt-get update -y sudo apt-get install -y bc + - name: Download test data artifacts + uses: actions/download-artifact@v4 + with: + pattern: "*_branch_data_${{ github.event.pull_request.number || github.run_id }}" + path: ./artifacts + merge-multiple: false + - name: Check for collection errors run: | - # Create analysis debug file - exec 3>&1 4>&2 - exec 1> >(tee -a debug_comparison_analysis.log) 2>&1 - - echo "Retrieving collection error status information" PR_COLLECTION_ERRORS="${{ needs.test-source-branch.outputs.collection_errors }}" PR_NO_TESTS="${{ needs.test-source-branch.outputs.no_tests_found }}" PR_ERROR_TYPE="${{ needs.test-source-branch.outputs.error_type }}" - PR_ERROR_DETAILS="${{ needs.test-source-branch.outputs.error_details }}" TARGET_COLLECTION_ERRORS="${{ needs.test-target-branch.outputs.collection_errors }}" - echo "PR branch collection errors: $PR_COLLECTION_ERRORS" - echo "PR branch no tests found: $PR_NO_TESTS" - echo "PR branch error type: $PR_ERROR_TYPE" - echo "Target branch collection errors: $TARGET_COLLECTION_ERRORS" - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - # Distinct error handling for PR branch + # Clean discovery status reporting if [[ "$PR_COLLECTION_ERRORS" == "true" ]]; then - echo "::error::Test discovery errors in PR branch: $PR_ERROR_TYPE" - echo "::error::$PR_ERROR_DETAILS" - echo "āŒ PR branch has test discovery errors. Python modules could not be imported correctly." + echo "āŒ Discovery Error (PR): $PR_ERROR_TYPE" exit 1 - fi - - if [[ "$PR_NO_TESTS" == "true" ]]; then - echo "::error::No tests were found in the PR branch" - echo "āŒ PR branch has no tests detected. Please add test files that match pytest's discovery pattern." + elif [[ "$PR_NO_TESTS" == "true" ]]; then + echo "āŒ No Tests Found (PR)" exit 1 + else + echo "āœ… Discovery Success (PR)" fi - # Warning for target branch issues (not a failure) if [[ "$TARGET_COLLECTION_ERRORS" == "true" ]]; then - echo "āš ļø Target branch has test discovery errors. Tests will still be compared but results may not be accurate." - fi - - if [[ "${{ needs.test-target-branch.outputs.no_tests_found }}" == "true" ]]; then - echo "āš ļø Target branch has no tests detected. PR branch tests will still be evaluated." + echo "āš ļø Discovery Error (Target)" + elif [[ "${{ needs.test-target-branch.outputs.no_tests_found }}" == "true" ]]; then + echo "āš ļø No Tests Found (Target)" + else + echo "āœ… Discovery Success (Target)" fi - # Split the regression check into separate steps for better control - # NOTE: These regression analysis steps are redundant with the perform-regression-analysis job - # but are kept for now to ensure backward compatibility. They should be removed in a future cleanup. - - name: Run regression analysis + - name: Run comprehensive regression analysis run: | - # Create analysis debug file - exec 3>&1 4>&2 - exec 1> >(tee -a debug_regression_analysis.log) 2>&1 - - echo "Running regression analysis..." + echo "Running comprehensive regression analysis..." python3 - << 'EOF' import json import os + import glob try: - # Parse the inputs - target_passing_str = '''${{ needs.test-target-branch.outputs.passing_tests }}''' - pr_failing_str = '''${{ needs.test-source-branch.outputs.failing_tests }}''' + # Load test data from artifacts + target_data = {} + pr_data = {} + + # Load target and PR data + target_files = glob.glob('./artifacts/target_branch_data_*/target_test_data.json') + if target_files: + with open(target_files[0], 'r') as f: + target_data = json.load(f) + + pr_files = glob.glob('./artifacts/pr_branch_data_*/pr_test_data.json') + if pr_files: + with open(pr_files[0], 'r') as f: + pr_data = json.load(f) + + # Extract all test categories + target_passing = set(target_data.get('passing_tests', [])) + target_failing = set(target_data.get('failing_tests', [])) + target_skipped = set(target_data.get('skipped_tests', [])) + target_xfailed = set(target_data.get('xfailed_tests', [])) + target_warnings = set(target_data.get('warnings', [])) - # Parse JSON - target_passing = json.loads(target_passing_str) if target_passing_str and target_passing_str != '[]' else [] - pr_failing = json.loads(pr_failing_str) if pr_failing_str and pr_failing_str != '[]' else [] + pr_passing = set(pr_data.get('passing_tests', [])) + pr_failing = set(pr_data.get('failing_tests', [])) + pr_skipped = set(pr_data.get('skipped_tests', [])) + pr_xfailed = set(pr_data.get('xfailed_tests', [])) + pr_warnings = set(pr_data.get('warnings', [])) - print(f"Parsed {len(target_passing)} passing tests from target branch") - print(f"Parsed {len(pr_failing)} failing tests from PR branch") + # Debug: Print data for troubleshooting + print(f"šŸ” Debug - Target branch data:") + print(f" Passing: {len(target_passing)} tests") + print(f" Failing: {len(target_failing)} tests") + print(f" Skipped: {len(target_skipped)} tests") + print(f" XFailed: {len(target_xfailed)} tests") + print(f"šŸ” Debug - PR branch data:") + print(f" Passing: {len(pr_passing)} tests") + print(f" Failing: {len(pr_failing)} tests") + print(f" Skipped: {len(pr_skipped)} tests") + print(f" XFailed: {len(pr_xfailed)} tests") - # Find regressions using set operations - target_passing_set = set(target_passing) - pr_failing_set = set(pr_failing) - regression_tests = list(target_passing_set.intersection(pr_failing_set)) + # All tests in each branch (regardless of outcome) + target_all_tests = target_passing.union(target_failing, target_skipped, target_xfailed) + pr_all_tests = pr_passing.union(pr_failing, pr_skipped, pr_xfailed) - # Write results to file if there are regressions - if regression_tests: - print(f"Found {len(regression_tests)} regression(s)!") + # Analyze different regression types + pass_to_fail = list(target_passing.intersection(pr_failing)) + pass_to_skip = list(target_passing.intersection(pr_skipped.union(pr_xfailed))) + fail_to_skip = list(target_failing.intersection(pr_skipped)) + pass_to_gone = list(target_passing - pr_all_tests) # Passing tests that completely disappeared + fail_to_gone = list(target_failing - pr_all_tests) # Failing tests that completely disappeared + discovery_regressions = list(pr_warnings - target_warnings) + + # Create comprehensive regression report + has_any_regressions = bool(pass_to_fail or pass_to_skip or fail_to_skip or pass_to_gone or fail_to_gone or discovery_regressions) + + with open("comprehensive_regression_report.txt", "w") as f: + f.write("COMPREHENSIVE REGRESSION ANALYSIS\n") + f.write("=" * 50 + "\n\n") + + if pass_to_fail: + f.write(f"PASS-TO-FAIL REGRESSIONS ({len(pass_to_fail)} tests)\n") + f.write("Previously passing, now failing:\n") + for i, test in enumerate(sorted(pass_to_fail), 1): + f.write(f" {i}. {test}\n") + f.write("\n") + + if pass_to_skip: + f.write(f"PASS-TO-SKIP REGRESSIONS ({len(pass_to_skip)} tests)\n") + f.write("Previously passing, now skipped or xfailed:\n") + for i, test in enumerate(sorted(pass_to_skip), 1): + f.write(f" {i}. {test}\n") + f.write("\n") + + if fail_to_skip: + f.write(f"FAIL-TO-SKIP REGRESSIONS ({len(fail_to_skip)} tests)\n") + f.write("Previously failing, now skipped:\n") + for i, test in enumerate(sorted(fail_to_skip), 1): + f.write(f" {i}. {test}\n") + f.write("\n") + + if pass_to_gone: + f.write(f"PASS-TO-GONE REGRESSIONS ({len(pass_to_gone)} tests)\n") + f.write("Previously passing, now completely missing:\n") + for i, test in enumerate(sorted(pass_to_gone), 1): + f.write(f" {i}. {test}\n") + f.write("\n") + + if fail_to_gone: + f.write(f"FAIL-TO-GONE REGRESSIONS ({len(fail_to_gone)} tests)\n") + f.write("Previously failing, now completely missing:\n") + for i, test in enumerate(sorted(fail_to_gone), 1): + f.write(f" {i}. {test}\n") + f.write("\n") + + if discovery_regressions: + f.write(f"DISCOVERY REGRESSIONS ({len(discovery_regressions)} warnings)\n") + f.write("New warnings not present in target branch:\n") + for i, warning in enumerate(sorted(discovery_regressions), 1): + f.write(f" {i}. {warning[:200]}...\n") + f.write("\n") + if not has_any_regressions: + f.write("No regressions detected across all categories.\n") + + # Also create the simple regression file for backward compatibility + if pass_to_fail: with open("regression_details.txt", "w") as f: - f.write(f"Found {len(regression_tests)} tests that were passing in target branch but now failing in PR branch:\\n\\n") - for idx, test in enumerate(sorted(regression_tests), 1): - f.write(f"{idx}. {test}\\n") - print("Regression details written to file") + f.write(f"Found {len(pass_to_fail)} tests that were passing in target branch but now failing in PR branch:\n\n") + for idx, test in enumerate(sorted(pass_to_fail), 1): + f.write(f"{idx}. {test}\n") + + # Print summary + print(f"šŸ“Š Regression Analysis Results:") + print(f" Pass-to-Fail: {len(pass_to_fail)} tests") + print(f" Pass-to-Skip: {len(pass_to_skip)} tests") + print(f" Fail-to-Skip: {len(fail_to_skip)} tests") + print(f" Pass-to-Gone: {len(pass_to_gone)} tests") + print(f" Fail-to-Gone: {len(fail_to_gone)} tests") + print(f" Discovery: {len(discovery_regressions)} warnings") + + if has_any_regressions: + print(f"āŒ Total regressions detected: {len(pass_to_fail) + len(pass_to_skip) + len(fail_to_skip) + len(pass_to_gone) + len(fail_to_gone) + len(discovery_regressions)}") else: - print("No regressions found") + print("āœ… No regressions detected") + except Exception as e: print(f"Error in regression analysis: {e}") import traceback print(traceback.format_exc()) EOF - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Regression analysis completed" + echo "āœ… Comprehensive regression analysis completed" - name: Check for regression details file id: check-regressions @@ -916,94 +931,17 @@ jobs: fi - name: Upload regression details artifact - if: steps.check-regressions.outputs.HAS_REGRESSIONS == 'true' && steps.check-regressions.outputs.REGRESSION_COUNT > 0 + if: always() uses: actions/upload-artifact@v4 with: - # The artifact name from reusable-regression-analyzer.yml name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - path: regression_details.txt + path: | + regression_details.txt + comprehensive_regression_report.txt retention-days: 1 + if-no-files-found: ignore - - name: Check for skip/xfail regressions - run: | - # Create analysis debug file - exec 3>&1 4>&2 - exec 1> >(tee -a debug_skip_xfail_analysis.log) 2>&1 - - echo "Checking for tests that were passing in target but are now skipped or xfailed in PR" - - python3 - << 'EOF' - import json - import os - - try: - # Parse the inputs - target_passing_str = '''${{ needs.test-target-branch.outputs.passing_tests }}''' - pr_skipped_str = '''${{ needs.test-source-branch.outputs.skipped_tests }}''' - pr_xfailed_str = '''${{ needs.test-source-branch.outputs.xfailed_tests }}''' - pr_skipped_with_reasons_str = '''${{ needs.test-source-branch.outputs.skipped_tests_with_reasons }}''' - pr_xfailed_with_reasons_str = '''${{ needs.test-source-branch.outputs.xfailed_tests_with_reasons }}''' - - # Parse JSON - target_passing = json.loads(target_passing_str) if target_passing_str and target_passing_str != '[]' else [] - pr_skipped = json.loads(pr_skipped_str) if pr_skipped_str and pr_skipped_str != '[]' else [] - pr_xfailed = json.loads(pr_xfailed_str) if pr_xfailed_str and pr_xfailed_str != '[]' else [] - pr_skipped_with_reasons = json.loads(pr_skipped_with_reasons_str) if pr_skipped_with_reasons_str and pr_skipped_with_reasons_str != '{}' else {} - pr_xfailed_with_reasons = json.loads(pr_xfailed_with_reasons_str) if pr_xfailed_with_reasons_str and pr_xfailed_with_reasons_str != '{}' else {} - - print(f"Parsed {len(target_passing)} passing tests from target branch") - print(f"Parsed {len(pr_skipped)} skipped tests from PR branch") - print(f"Parsed {len(pr_xfailed)} xfailed tests from PR branch") - print(f"Parsed {len(pr_skipped_with_reasons)} skipped tests with reasons") - print(f"Parsed {len(pr_xfailed_with_reasons)} xfailed tests with reasons") - - # Find tests that were passing in target but are now skipped or xfailed in PR - target_passing_set = set(target_passing) - pr_skipped_set = set(pr_skipped) - pr_xfailed_set = set(pr_xfailed) - - passing_to_skipped = list(target_passing_set.intersection(pr_skipped_set)) - passing_to_xfailed = list(target_passing_set.intersection(pr_xfailed_set)) - - total_skip_xfail_regressions = len(passing_to_skipped) + len(passing_to_xfailed) - - if total_skip_xfail_regressions > 0: - print(f"Found {total_skip_xfail_regressions} tests that were passing in target but are now skipped/xfailed in PR!") - - # Build comprehensive warning message - warning_parts = [f"Skip/XFail Analysis: {total_skip_xfail_regressions} test(s) that were passing in target branch are now being skipped or xfailed in PR branch."] - - if passing_to_skipped: - warning_parts.append(f"Tests now SKIPPED ({len(passing_to_skipped)}):") - for idx, test in enumerate(sorted(passing_to_skipped), 1): - reason = pr_skipped_with_reasons.get(test, 'No reason provided') - warning_parts.append(f" {idx}. {test} - Reason: {reason}") - - if passing_to_xfailed: - warning_parts.append(f"Tests now XFAILED ({len(passing_to_xfailed)}):") - for idx, test in enumerate(sorted(passing_to_xfailed), 1): - reason = pr_xfailed_with_reasons.get(test, 'No reason provided') - warning_parts.append(f" {idx}. {test} - Reason: {reason}") - - warning_parts.append("While these changes don't fail the workflow, they indicate tests that were working before are now being bypassed. Please review these tests to ensure this is intentional.") - - # Print as single warning annotation - combined_warning = " ".join(warning_parts) - print(f"::warning::{combined_warning}") - else: - print("No skip/xfail regressions found - all previously passing tests are still running.") - except Exception as e: - print(f"Error in skip/xfail analysis: {e}") - import traceback - print(traceback.format_exc()) - EOF - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Skip/xfail regression analysis completed" - - - name: Check for test additions and removals + - name: Check for test additions and removals from artifacts run: | # Create analysis debug file exec 3>&1 4>&2 @@ -1014,15 +952,27 @@ jobs: python3 - << 'EOF' import json import os + import glob try: - # Parse the inputs - target_all_str = '''${{ needs.test-target-branch.outputs.all_tests }}''' - pr_all_str = '''${{ needs.test-source-branch.outputs.all_tests }}''' + # Load test data from artifacts + target_data = {} + pr_data = {} - # Parse JSON - target_all = json.loads(target_all_str) if target_all_str and target_all_str != '[]' else [] - pr_all = json.loads(pr_all_str) if pr_all_str and pr_all_str != '[]' else [] + # Load target and PR data + target_files = glob.glob('./artifacts/target_branch_data_*/target_test_data.json') + if target_files: + with open(target_files[0], 'r') as f: + target_data = json.load(f) + + pr_files = glob.glob('./artifacts/pr_branch_data_*/pr_test_data.json') + if pr_files: + with open(pr_files[0], 'r') as f: + pr_data = json.load(f) + + # Extract test arrays + target_all = target_data.get('all_tests', []) + pr_all = pr_data.get('all_tests', []) print(f"Parsed {len(target_all)} total tests from target branch") print(f"Parsed {len(pr_all)} total tests from PR branch") @@ -1034,45 +984,15 @@ jobs: removed_tests = list(target_all_set - pr_all_set) # In target but not in PR added_tests = list(pr_all_set - target_all_set) # In PR but not in target - # Report removed tests (warnings) - if removed_tests: - print(f"Found {len(removed_tests)} tests that were removed from target branch!") - - # Build comprehensive removal warning - removal_parts = [f"Test Removal Analysis: {len(removed_tests)} test(s) that existed in target branch are missing from PR branch."] - removal_parts.append(f"REMOVED Tests ({len(removed_tests)}):") - for idx, test in enumerate(sorted(removed_tests), 1): - removal_parts.append(f" {idx}. {test}") - removal_parts.append("These test removals should be reviewed to ensure they are intentional. If tests were renamed or moved, this may show as removal + addition.") - - # Print as single warning annotation - combined_removal_warning = " ".join(removal_parts) - print(f"::warning::{combined_removal_warning}") + # Report test changes + if removed_tests and added_tests: + print(f"šŸ“Š Test Changes: +{len(added_tests)} added, -{len(removed_tests)} removed") + elif added_tests: + print(f"āœ… Test Additions: {len(added_tests)} new test(s) added") + elif removed_tests: + print(f"āš ļø Test Removals: {len(removed_tests)} test(s) removed") else: - print("No test removals detected.") - - # Report added tests (notifications/info) - if added_tests: - print(f"Found {len(added_tests)} new tests added in PR branch!") - - # Build comprehensive addition notice - addition_parts = [f"Test Addition Analysis: {len(added_tests)} new test(s) have been added in the PR branch."] - addition_parts.append(f"NEW Tests ({len(added_tests)}):") - for idx, test in enumerate(sorted(added_tests), 1): - addition_parts.append(f" {idx}. {test}") - addition_parts.append("New tests detected - this indicates expanded test coverage!") - - # Print as single notice annotation - combined_addition_notice = " ".join(addition_parts) - print(f"::notice::{combined_addition_notice}") - else: - print("No new tests detected in PR branch.") - - # Summary - if not removed_tests and not added_tests: - print("Test suite composition is unchanged between target and PR branches.") - else: - print(f"Test suite changes: {len(added_tests)} added, {len(removed_tests)} removed") + print("āœ… Test suite unchanged") except Exception as e: print(f"Error in test addition/removal analysis: {e}") @@ -1087,9 +1007,8 @@ jobs: - name: Compare test results run: | - echo "Test Results Summary:" - echo "Target branch (${{ inputs.target_branch_to_compare }}): ${{ needs.test-target-branch.outputs.passed }}/${{ needs.test-target-branch.outputs.total }} tests passed (${{ needs.test-target-branch.outputs.percentage }}%)" - echo "PR branch: ${{ needs.test-source-branch.outputs.passed }}/${{ needs.test-source-branch.outputs.total }} tests passed (${{ needs.test-source-branch.outputs.percentage }}%)" + echo "Target: ${{ needs.test-target-branch.outputs.passed }}/${{ needs.test-target-branch.outputs.total }} passed (${{ needs.test-target-branch.outputs.percentage }}%)" + echo "PR: ${{ needs.test-source-branch.outputs.passed }}/${{ needs.test-source-branch.outputs.total }} passed (${{ needs.test-source-branch.outputs.percentage }}%)" if [[ "${{ needs.test-source-branch.outputs.total }}" == "0" ]]; then echo "::error::No tests were found in the PR branch" @@ -1116,23 +1035,82 @@ jobs: fi fi - # Fail if any tests passed in target branch but now fail in PR branch - if [[ "${{ needs.perform-regression-analysis.outputs.has_regressions }}" == "true" ]]; then - echo "āŒ PR branch has test regressions from target branch" + # Check for regressions from meta-regression-analysis OR our comprehensive analysis + COMPREHENSIVE_REGRESSIONS="false" + if [ -f comprehensive_regression_report.txt ]; then + # Check if there are any actual regressions in our comprehensive report + if grep -q "REGRESSIONS.*([1-9]" comprehensive_regression_report.txt; then + COMPREHENSIVE_REGRESSIONS="true" + fi + fi + + if [[ "${{ needs.perform-regression-analysis.outputs.has_regressions }}" == "true" ]] || [[ "$COMPREHENSIVE_REGRESSIONS" == "true" ]]; then + echo "āŒ Test regressions detected from target branch" REGRESSION_COUNT_VAL=${{ needs.perform-regression-analysis.outputs.regression_count }} - echo " - $REGRESSION_COUNT_VAL tests that were passing in target branch are now failing" echo "### :x: Test Regressions Detected!" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY - echo "**$REGRESSION_COUNT_VAL test(s) that were passing in the target branch are now failing in the PR branch.**" >> $GITHUB_STEP_SUMMARY - echo "This job (\`compare-results\`) has been marked as failed due to these regressions." >> $GITHUB_STEP_SUMMARY + + # Extract counts from comprehensive report if available + if [ -f comprehensive_regression_report.txt ]; then + PASS_FAIL_COUNT=$(grep -o "PASS-TO-FAIL REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + PASS_SKIP_COUNT=$(grep -o "PASS-TO-SKIP REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + FAIL_SKIP_COUNT=$(grep -o "FAIL-TO-SKIP REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + PASS_GONE_COUNT=$(grep -o "PASS-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + FAIL_GONE_COUNT=$(grep -o "FAIL-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + DISCOVERY_COUNT=$(grep -o "DISCOVERY REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + + TOTAL_REGRESSIONS=$((PASS_FAIL_COUNT + PASS_SKIP_COUNT + FAIL_SKIP_COUNT + PASS_GONE_COUNT + FAIL_GONE_COUNT + DISCOVERY_COUNT)) + + echo "**$TOTAL_REGRESSIONS total regression(s) detected across multiple categories:**" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Category | Count |" >> $GITHUB_STEP_SUMMARY + echo "|----------|-------|" >> $GITHUB_STEP_SUMMARY + echo "| Pass → Fail | $PASS_FAIL_COUNT |" >> $GITHUB_STEP_SUMMARY + echo "| Pass → Skip/XFail | $PASS_SKIP_COUNT |" >> $GITHUB_STEP_SUMMARY + echo "| Fail → Skip | $FAIL_SKIP_COUNT |" >> $GITHUB_STEP_SUMMARY + echo "| Pass → Gone | $PASS_GONE_COUNT |" >> $GITHUB_STEP_SUMMARY + echo "| Fail → Gone | $FAIL_GONE_COUNT |" >> $GITHUB_STEP_SUMMARY + echo "| Discovery Warnings | $DISCOVERY_COUNT |" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + else + echo "**$REGRESSION_COUNT_VAL test regression(s) detected.** See detailed breakdown below:" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + fi - if [ -f regression_details.txt ]; then # Check if file exists (it should from previous step) - echo "Failing tests (regressions) list:" - cat regression_details.txt + # Display comprehensive regression report if available + if [ -f comprehensive_regression_report.txt ]; then + echo "šŸ“‹ **Comprehensive Regression Analysis:**" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + # Parse and format the comprehensive report for better GitHub display + while IFS= read -r line; do + if [[ "$line" =~ ^[A-Z-].*REGRESSIONS.*\([0-9]+ ]]; then + echo "### $line" >> $GITHUB_STEP_SUMMARY + elif [[ "$line" =~ ^Previously ]]; then + echo "*$line*" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + elif [[ "$line" =~ ^[[:space:]]*[0-9]+\. ]]; then + echo "- ${line#*. }" >> $GITHUB_STEP_SUMMARY + elif [[ ! "$line" =~ ^=.*=$ ]] && [[ -n "$line" ]]; then + echo "$line" >> $GITHUB_STEP_SUMMARY + fi + done < comprehensive_regression_report.txt + + echo "" >> $GITHUB_STEP_SUMMARY + elif [ -f regression_details.txt ]; then + echo "### Pass-to-Fail Regressions" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + grep "^[0-9]\+\." regression_details.txt | while read -r line; do + echo "- ${line#*. }" >> $GITHUB_STEP_SUMMARY + done + echo "" >> $GITHUB_STEP_SUMMARY else - echo "::warning::Regression details file (regression_details.txt) not found in Compare test results step. It might be available as an artifact from the 'perform-regression-analysis' job." + echo "::warning::Regression details files not found." fi + + echo "" >> $GITHUB_STEP_SUMMARY + echo "This job (\`compare-results\`) has been marked as failed due to these regressions." >> $GITHUB_STEP_SUMMARY exit 1 fi @@ -1178,15 +1156,6 @@ jobs: exit 1 fi - - name: Upload comparison analysis logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: comparison_analysis_logs_${{ github.event.pull_request.number || github.run_id }} - path: debug_*_analysis.log - retention-days: 3 - if-no-files-found: ignore - perform-regression-analysis: needs: [test-source-branch, test-target-branch] uses: ./.github/workflows/meta-regression-analysis.yml @@ -1195,17 +1164,14 @@ jobs: item_type_plural: "tests" pr_number: ${{ github.event.pull_request.number }} run_id: ${{ github.run_id }} - target_branch_artifact_name: target_branch_test_data_${{ github.event.pull_request.number || github.run_id }} - pr_branch_artifact_name: pr_branch_test_data_${{ github.event.pull_request.number || github.run_id }} - # Secrets are not needed for this reusable workflow currently - # secrets: inherit + target_branch_artifact_name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} + pr_branch_artifact_name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} # Conditionally run notification job only if needed prepare-notification: name: Prepare Notification Data needs: [ - lint, test-source-branch, test-target-branch, compare-results, @@ -1236,19 +1202,37 @@ jobs: echo "::notice::DISCORD_WEBHOOK_URL secret is not set. Discord notifications will likely be skipped by the alert workflow if it relies on this secret." echo "webhook_available=false" >> $GITHUB_OUTPUT else - echo "::debug::DISCORD_WEBHOOK_URL secret is present." echo "webhook_available=true" >> $GITHUB_OUTPUT fi - name: Download regression details (if any) id: download_regressions - if: needs.perform-regression-analysis.outputs.has_regressions == 'true' && needs.perform-regression-analysis.outputs.regression_count > 0 + if: always() uses: actions/download-artifact@v4 with: - # The artifact name from reusable-regression-analyzer.yml name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests path: . # Download to current directory continue-on-error: true + - name: Check downloaded regression file + if: always() + run: | + echo "Checking for regression details file..." + if [ -f "regression_details.txt" ]; then + echo "āœ… Regression details file found" + echo "File size: $(wc -c < regression_details.txt) bytes" + echo "First few lines:" + head -5 regression_details.txt + else + echo "āŒ Regression details file not found" + fi + + if [ -f "comprehensive_regression_report.txt" ]; then + echo "āœ… Comprehensive regression report found" + echo "File size: $(wc -c < comprehensive_regression_report.txt) bytes" + else + echo "āŒ Comprehensive regression report not found" + fi + - name: Construct Discord Notification id: construct_notification env: @@ -1426,8 +1410,8 @@ jobs: if [[ "$PR_COLLECTION_ERRORS" == "true" ]]; then MESSAGE_LINES+=("**:red_circle: ERROR: Test Discovery Failed in PR Branch (\`${PR_BRANCH}\`)**") MESSAGE_LINES+=(" - Type: \`${PR_ERROR_TYPE}\`") - MESSAGE_LINES+=(" - Details: \`\`\`${PR_ERROR_DETAILS_TRUNCATED}\`\`\`") MESSAGE_LINES+=(" - This usually indicates import errors or syntax issues preventing tests from being collected.") + MESSAGE_LINES+=(" - See attached file for detailed error information.") elif [[ "$PR_NO_TESTS_FOUND" == "true" ]]; then MESSAGE_LINES+=("**:warning: WARNING: No Tests Found in PR Branch (\`${PR_BRANCH}\`)**") MESSAGE_LINES+=(" - Pytest did not discover any test files matching its patterns.") @@ -1437,38 +1421,90 @@ jobs: # Regression Analysis Summary if [[ "$HAS_REGRESSIONS" == "true" ]]; then MESSAGE_LINES+=("**:red_circle: REGRESSIONS DETECTED**") - MESSAGE_LINES+=(" - **${REGRESSION_COUNT} test(s)** that were passing in \`${TARGET_BRANCH}\` are now **failing** in \`${PR_BRANCH}\`.") - # Calculate current message length - CURRENT_MESSAGE=$(printf "%s\\n" "${MESSAGE_LINES[@]}") - CURRENT_LENGTH=${#CURRENT_MESSAGE} - - if [ -f "$REGRESSION_FILE_PATH" ] && [[ "$DOWNLOAD_REGRESSIONS_OUTCOME" == "success" ]]; then - # Read regression details - REGRESSION_LIST=$(awk '/^[0-9]+\./ {sub(/^[0-9]+\. /, "- "); print}' "$REGRESSION_FILE_PATH") + # Check if we have comprehensive regression file with categories + if [ -f "comprehensive_regression_report.txt" ]; then + # Extract counts from comprehensive report + PASS_FAIL_COUNT=$(grep -o "PASS-TO-FAIL REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + PASS_SKIP_COUNT=$(grep -o "PASS-TO-SKIP REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + FAIL_SKIP_COUNT=$(grep -o "FAIL-TO-SKIP REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + PASS_GONE_COUNT=$(grep -o "PASS-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + FAIL_GONE_COUNT=$(grep -o "FAIL-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") + DISCOVERY_COUNT=$(grep -o "DISCOVERY REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0") - # Calculate length with regression details - TEMP_MESSAGE="$CURRENT_MESSAGE" - TEMP_MESSAGE+="\`\`\`" - TEMP_MESSAGE+="$REGRESSION_LIST" - TEMP_MESSAGE+="\`\`\`" - TEMP_LENGTH=${#TEMP_MESSAGE} + # Add category summaries (≤5 show paths, >5 show count + refer to file) + if [[ "$PASS_FAIL_COUNT" -gt 0 ]]; then + if [[ "$PASS_FAIL_COUNT" -le 5 ]]; then + MESSAGE_LINES+=("**Pass→Fail ($PASS_FAIL_COUNT):**") + readarray -t test_paths < <(grep -A 100 "PASS-TO-FAIL REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_FAIL_COUNT | sed 's/^ [0-9]\+\. //') + for test_path in "${test_paths[@]}"; do + MESSAGE_LINES+=("• \`$test_path\`") + done + else + MESSAGE_LINES+=("**Pass→Fail:** $PASS_FAIL_COUNT tests (see attached file)") + fi + fi + + if [[ "$PASS_SKIP_COUNT" -gt 0 ]]; then + if [[ "$PASS_SKIP_COUNT" -le 5 ]]; then + MESSAGE_LINES+=("**Pass→Skip ($PASS_SKIP_COUNT):**") + readarray -t test_paths < <(grep -A 100 "PASS-TO-SKIP REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_SKIP_COUNT | sed 's/^ [0-9]\+\. //') + for test_path in "${test_paths[@]}"; do + MESSAGE_LINES+=("• \`$test_path\`") + done + else + MESSAGE_LINES+=("**Pass→Skip:** $PASS_SKIP_COUNT tests (see attached file)") + fi + fi + + if [[ "$FAIL_SKIP_COUNT" -gt 0 ]]; then + if [[ "$FAIL_SKIP_COUNT" -le 5 ]]; then + MESSAGE_LINES+=("**Fail→Skip ($FAIL_SKIP_COUNT):**") + readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_COUNT | sed 's/^ [0-9]\+\. //') + for test_path in "${test_paths[@]}"; do + MESSAGE_LINES+=("• \`$test_path\`") + done + else + MESSAGE_LINES+=("**Fail→Skip:** $FAIL_SKIP_COUNT tests (see attached file)") + fi + fi + + if [[ "$PASS_GONE_COUNT" -gt 0 ]]; then + if [[ "$PASS_GONE_COUNT" -le 5 ]]; then + MESSAGE_LINES+=("**Pass→Gone ($PASS_GONE_COUNT):**") + readarray -t test_paths < <(grep -A 100 "PASS-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_GONE_COUNT | sed 's/^ [0-9]\+\. //') + for test_path in "${test_paths[@]}"; do + MESSAGE_LINES+=("• \`$test_path\`") + done + else + MESSAGE_LINES+=("**Pass→Gone:** $PASS_GONE_COUNT tests (see attached file)") + fi + fi + + if [[ "$FAIL_GONE_COUNT" -gt 0 ]]; then + if [[ "$FAIL_GONE_COUNT" -le 5 ]]; then + MESSAGE_LINES+=("**Fail→Gone ($FAIL_GONE_COUNT):**") + readarray -t test_paths < <(grep -A 100 "FAIL-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_GONE_COUNT | sed 's/^ [0-9]\+\. //') + for test_path in "${test_paths[@]}"; do + MESSAGE_LINES+=("• \`$test_path\`") + done + else + MESSAGE_LINES+=("**Fail→Gone:** $FAIL_GONE_COUNT tests (see attached file)") + fi + fi - if [ $TEMP_LENGTH -le 2000 ]; then - # If total length would be under 2000 chars, include in message - MESSAGE_LINES+=(" - **Failed Tests (Regressions):**") - MESSAGE_LINES+=("\`\`\`") - MESSAGE_LINES+=("$REGRESSION_LIST") - MESSAGE_LINES+=("\`\`\`") - ARTIFACT_PATH_OUTPUT="" # No artifact if details are inline - else - # If would exceed 2000 chars, attach file instead - MESSAGE_LINES+=(" - Details for the ${REGRESSION_COUNT} regressions are in the attached \`regression_details.txt\` file.") - ARTIFACT_PATH_OUTPUT="$REGRESSION_FILE_PATH" + if [[ "$DISCOVERY_COUNT" -gt 0 ]]; then + if [[ "$DISCOVERY_COUNT" -le 5 ]]; then + MESSAGE_LINES+=("**Discovery Warnings ($DISCOVERY_COUNT):**") + MESSAGE_LINES+=("• $DISCOVERY_COUNT new warnings (see attached file)") + else + MESSAGE_LINES+=("**Discovery Warnings:** $DISCOVERY_COUNT warnings (see attached file)") + fi fi + else - MESSAGE_LINES+=(" (Regression details file not found or download failed; cannot list specific regressions here.)") - ARTIFACT_PATH_OUTPUT="" + # Fallback to simple regression count + MESSAGE_LINES+=(" - **${REGRESSION_COUNT} test(s)** that were passing in \`${TARGET_BRANCH}\` are now **failing** in \`${PR_BRANCH}\`.") fi elif [[ "$COMPARE_RESULT" == "failure" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then # This case handles general comparison failures NOT due to specific regressions @@ -1485,6 +1521,15 @@ jobs: MESSAGE_LINES+=("---") MESSAGE_LINES+=("[View Workflow Run](${ACTION_RUN_URL})") + # Set artifact path - always prefer comprehensive report if it exists + if [ -f "comprehensive_regression_report.txt" ]; then + ARTIFACT_PATH_OUTPUT="comprehensive_regression_report.txt" + elif [ -f "$REGRESSION_FILE_PATH" ] && [[ "$DOWNLOAD_REGRESSIONS_OUTCOME" == "success" ]]; then + ARTIFACT_PATH_OUTPUT="$REGRESSION_FILE_PATH" + else + ARTIFACT_PATH_OUTPUT="" + fi + # Construct with actual newlines FINAL_MESSAGE_BODY=$(printf "%s\\n" "${MESSAGE_LINES[@]}") if [ ${#MESSAGE_LINES[@]} -gt 0 ]; then @@ -1500,17 +1545,6 @@ jobs: echo "artifact_path_out=$ARTIFACT_PATH_OUTPUT" >> $GITHUB_OUTPUT - - name: Upload notification construction debug logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: notification_debug_logs_${{ github.event.pull_request.number || github.run_id }} - path: debug_notification_construction.log - retention-days: 3 - if-no-files-found: ignore - - # Even if webhook checks are handled inside the alert workflow, - # we still need to pass the secret to satisfy GitHub's workflow validation notify-discord: name: Send Discord Notification needs: [prepare-notification] diff --git a/.github/workflows/test-pytest.yml b/.github/workflows/test-pytest.yml deleted file mode 100644 index 0a9bad5..0000000 --- a/.github/workflows/test-pytest.yml +++ /dev/null @@ -1,1401 +0,0 @@ -name: Reusable Compare Pytest Results - -on: - workflow_call: - inputs: - target_branch_to_compare: - description: "The target branch to compare against (e.g., main, refs/heads/main)." - required: true - type: string - python-version: - description: "Python version to use for testing." - required: false - type: string - default: "3.10" - ping_latest_committer: - description: "If true, the latest committer on the PR will be added to the ping list." - required: false - type: boolean - default: false - runs_on: - required: false - type: string - default: "ubuntu-latest" - secrets: - DISCORD_WEBHOOK_URL: - description: "Discord Webhook URL for failure notifications. If not provided, notifications are skipped." - required: false - DISCORD_USER_MAP: - description: 'JSON string mapping GitHub usernames to Discord User IDs (e.g., {"user1":"id1"}). If not provided, users won''t be pinged.' - required: false - outputs: - pr_total: - description: "Total tests in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.total }} - pr_passed: - description: "Passed tests in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.passed }} - pr_percentage: - description: "Pass percentage in PR/source branch" - value: ${{ jobs.test-source-branch.outputs.percentage }} - pr_collection_errors: - description: "PR branch has collection errors" - value: ${{ jobs.test-source-branch.outputs.collection_errors }} - pr_no_tests_found: - description: "PR branch has no tests found" - value: ${{ jobs.test-source-branch.outputs.no_tests_found }} - target_total: - description: "Total tests in target branch" - value: ${{ jobs.test-target-branch.outputs.total }} - target_passed: - description: "Passed tests in target branch" - value: ${{ jobs.test-target-branch.outputs.passed }} - target_percentage: - description: "Pass percentage in target branch" - value: ${{ jobs.test-target-branch.outputs.percentage }} - has_regressions: - description: "Boolean indicating if regressions were found" - value: ${{ jobs.compare-results.outputs.has_regressions }} - regression_count: - description: "Number of test regressions found" - value: ${{ jobs.compare-results.outputs.regression_count }} - -jobs: - lint: - uses: ./.github/workflows/test-lint-py.yml - permissions: - contents: write # Lint job might push changes - - test-source-branch: - needs: lint - runs-on: ${{ inputs.runs_on }} - outputs: - total: ${{ steps.extract-results.outputs.total }} - passed: ${{ steps.extract-results.outputs.passed }} - percentage: ${{ steps.extract-results.outputs.percentage }} - collection_errors: ${{ steps.check-collection.outputs.has_collection_errors }} - no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} - has_errors: ${{ steps.check-collection.outputs.has_errors }} - error_type: ${{ steps.check-collection.outputs.error_type }} - error_details: ${{ steps.check-collection.outputs.error_details }} - failing_count: ${{ steps.extract-results.outputs.failing_count }} - skipped_count: ${{ steps.extract-results.outputs.skipped_count }} - xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }} - - steps: - - name: Checkout PR Branch - uses: actions/checkout@v4.2.2 - with: - submodules: "recursive" - - - name: Set up Python - uses: actions/setup-python@v5.3.0 - with: - python-version: "${{ inputs.python-version }}" - - - name: Install dependencies - run: | - python -m pip install --upgrade pip --quiet - pip install pytest pytest-json-report pytest-asyncio --quiet - if [ -f requirements.txt ]; then - pip install -r requirements.txt --quiet - fi - - - name: Check for test collection errors - id: check-collection - run: | - echo "Running pytest collection check..." - - # Redirect ALL output to files, including subprocess stderr - python -m pytest --collect-only -v > collection_output.txt 2>&1 || true - - # Save to debug file for artifact - echo "=== COLLECTION CHECK OUTPUT ===" >> debug_collection.log - cat collection_output.txt >> debug_collection.log - - # Set default values - HAS_COLLECTION_ERRORS="false" - NO_TESTS_FOUND="false" - ERROR_TYPE="none" - ERROR_DETAILS="none" - - if grep -q "ImportError\|ModuleNotFoundError\|SyntaxError\|ERROR collecting\|Interrupted:" collection_output.txt; then - echo "::error::Test discovery errors detected in PR branch - Python modules could not be imported correctly" - - # Attempt to identify specific error type - if grep -q "ImportError" collection_output.txt; then - ERROR_TYPE="ImportError" - elif grep -q "ModuleNotFoundError" collection_output.txt; then - ERROR_TYPE="ModuleNotFoundError" - elif grep -q "SyntaxError" collection_output.txt; then - ERROR_TYPE="SyntaxError" - elif grep -q "ERROR collecting" collection_output.txt; then - ERROR_TYPE="CollectionError" - elif grep -q "Interrupted:" collection_output.txt; then - ERROR_TYPE="Interrupted" - else - ERROR_TYPE="UnknownError" - fi - - echo "PR branch discovery error type: $ERROR_TYPE" - - ERROR_FILE=$(grep -o "ERROR collecting.*\.py" collection_output.txt | grep -o "[a-zA-Z0-9_/]*\.py" || echo "Unknown file") - - if [[ "$ERROR_FILE" != "Unknown file" ]]; then - echo "Error in file $ERROR_FILE" - grep -A 15 "$ERROR_FILE" collection_output.txt > error_details.txt - # Truncate to 200 chars for GitHub output, full details go to artifact - ERROR_DETAILS=$(cat error_details.txt | tr '\n' ' ' | sed 's/"/\\"/g' | cut -c1-200) - else - grep -A 15 "ImportError\|ModuleNotFoundError\|SyntaxError\|ERROR collecting\|Interrupted:" collection_output.txt | head -20 > error_details.txt - # Truncate to 200 chars for GitHub output, full details go to artifact - ERROR_DETAILS=$(cat error_details.txt | tr '\n' ' ' | sed 's/"/\\"/g' | cut -c1-200) - fi - - echo "::error::PR branch discovery error details (truncated): ${ERROR_DETAILS}..." - HAS_COLLECTION_ERRORS="true" - else - echo "No discovery errors detected in PR branch" - - TEST_COUNT=$(grep -o "collected [0-9]* item" collection_output.txt | grep -o "[0-9]*" || echo "0") - - if [[ "$TEST_COUNT" == "0" ]]; then - echo "::warning::No tests were found in the PR branch" - NO_TESTS_FOUND="true" - ERROR_TYPE="NoTestsFound" - ERROR_DETAILS="No test files discovered matching pytest pattern" - else - echo "Found $TEST_COUNT tests in PR branch" - fi - fi - - # Set all the outputs with size limits - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT - - # For backward compatibility - if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT - else - echo "has_errors=false" >> $GITHUB_OUTPUT - fi - - - name: Run tests on PR Branch - if: steps.check-collection.outputs.has_collection_errors != 'true' - env: - PYTHONUNBUFFERED: "1" - PYTHONIOENCODING: "utf-8" - PYTHONWARNINGS: "ignore" - MIGRATION_MANAGER_LOG_LEVEL: "ERROR" - MIGRATION_MANAGER_QUIET: "true" - SQLALCHEMY_WARN_20: "false" - SQLALCHEMY_SILENCE_UBER_WARNING: "1" - PYTEST_DISABLE_PLUGIN_AUTOLOAD: "1" - PYTEST_CURRENT_TEST: "" - run: | - echo "Running tests on PR branch..." - - # Run pytest with aggressive output suppression - redirect everything to log file - exec 3>&1 4>&2 - exec 1>/dev/null 2>/dev/null - - python -m pytest --json-report --json-report-file=pr_results.json --tb=no -q --disable-warnings --log-level=CRITICAL --log-cli-level=CRITICAL 2>&1 | tee test_run_output.log >/dev/null || true - - # Restore output for final messages - exec 1>&3 2>&4 - - # Save output to debug file for artifact - echo "=== PYTEST RUN OUTPUT ===" >> debug_test_run.log - if [ -f test_run_output.log ]; then - cat test_run_output.log >> debug_test_run.log - fi - - if [ -f pr_results.json ]; then - echo "Test results file created successfully" - else - echo "::error::Failed to create test results file for PR branch" - fi - - - name: Extract test results and create artifacts - id: extract-results - run: | - echo "PR_BRANCH=$(git rev-parse --abbrev-ref HEAD)" >> $GITHUB_ENV - echo "Processing test results for PR branch: $PR_BRANCH" - - # Create debug file for detailed output - exec 3>&1 4>&2 - exec 1> >(tee -a debug_extract_results.log) 2>&1 - - python -c " - import json - import sys - import os - - # Default values in case file doesn't exist or is invalid - pr_total = 0 - pr_passed = 0 - pr_percentage = 0 - failing_tests = [] - skipped_tests = [] - xfailed_tests = [] - all_tests = [] - skipped_tests_with_reasons = {} - xfailed_tests_with_reasons = {} - - try: - with open('pr_results.json') as f: - pr_results = json.load(f) - - # Check for collection errors by looking at exitcode or error patterns - if pr_results.get('exitcode', 0) > 1: - pr_total = 0 # Explicitly set to 0 - no tests run when collection fails - pr_passed = 0 - elif 'summary' in pr_results and isinstance(pr_results['summary'], dict): - # Normal case - extract data from summary - summary = pr_results['summary'] - pr_total = summary.get('total', 0) - pr_passed = summary.get('passed', 0) - - # Extract all tests by outcome and collect all test nodeids with reasons - if 'tests' in pr_results: - for test in pr_results['tests']: - outcome = test.get('outcome') - nodeid = test.get('nodeid', '') - if nodeid: - all_tests.append(nodeid) # Track all tests regardless of outcome - if outcome in ['failed', 'error']: - failing_tests.append(nodeid) - elif outcome == 'skipped': - skipped_tests.append(nodeid) - # Extract skip reason - skip_reason = 'No reason provided' - if 'longrepr' in test and test['longrepr']: - # longrepr can be a string or list, handle both - longrepr = test['longrepr'] - if isinstance(longrepr, list) and longrepr: - skip_reason = str(longrepr[0]) if longrepr[0] else 'No reason provided' - elif isinstance(longrepr, str): - skip_reason = longrepr - elif 'call' in test and test['call'] and 'longrepr' in test['call']: - skip_reason = str(test['call']['longrepr']) - skipped_tests_with_reasons[nodeid] = skip_reason.strip() - elif outcome == 'xfailed': - xfailed_tests.append(nodeid) - # Extract xfail reason - xfail_reason = 'No reason provided' - if 'longrepr' in test and test['longrepr']: - longrepr = test['longrepr'] - if isinstance(longrepr, list) and longrepr: - xfail_reason = str(longrepr[0]) if longrepr[0] else 'No reason provided' - elif isinstance(longrepr, str): - xfail_reason = longrepr - elif 'call' in test and test['call'] and 'longrepr' in test['call']: - xfail_reason = str(test['call']['longrepr']) - xfailed_tests_with_reasons[nodeid] = xfail_reason.strip() - - # Calculate percentage safely - pr_percentage = (pr_passed / pr_total * 100) if pr_total > 0 else 0 - - except Exception as e: - pass # Use defaults on any error - - # Set scalar outputs only (no large arrays) - with open(os.environ['GITHUB_OUTPUT'], 'a') as f: - f.write(f'total={pr_total}\\n') - f.write(f'passed={pr_passed}\\n') - f.write(f'percentage={pr_percentage:.2f}\\n') - f.write(f'failing_count={len(failing_tests)}\\n') - f.write(f'skipped_count={len(skipped_tests)}\\n') - f.write(f'xfailed_count={len(xfailed_tests)}\\n') - - # Save test lists to artifact files instead of job outputs - test_data = { - 'failing_tests': failing_tests, - 'skipped_tests': skipped_tests, - 'xfailed_tests': xfailed_tests, - 'all_tests': all_tests, - 'skipped_tests_with_reasons': skipped_tests_with_reasons, - 'xfailed_tests_with_reasons': xfailed_tests_with_reasons - } - - with open('pr_test_data.json', 'w') as f: - json.dump(test_data, f, indent=2) - " - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "PR branch test results processed" - - - name: Upload PR branch artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - path: | - pr_test_data.json - debug_*.log - test_run_output.log - collection_output.txt - error_details.txt - pr_results.json - retention-days: 3 - if-no-files-found: ignore - - test-target-branch: - needs: lint - runs-on: ${{ inputs.runs_on }} - outputs: - total: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.total || steps.extract-results.outputs.total }} - passed: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passed || steps.extract-results.outputs.passed }} - percentage: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.percentage || steps.extract-results.outputs.percentage }} - collection_errors: ${{ steps.check-collection.outputs.has_collection_errors }} - no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }} - has_errors: ${{ steps.check-collection.outputs.has_errors }} - error_type: ${{ steps.check-collection.outputs.error_type }} - error_details: ${{ steps.check-collection.outputs.error_details }} - passing_count: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passing_count || steps.extract-results.outputs.passing_count }} - - steps: - - name: Checkout target branch - uses: actions/checkout@v4.2.2 - with: - submodules: "recursive" - ref: ${{ inputs.target_branch_to_compare }} - - - name: Set up Python - uses: actions/setup-python@v5.3.0 - with: - python-version: "${{ inputs.python-version }}" - - - name: Install dependencies - run: | - python -m pip install --upgrade pip --quiet - pip install pytest pytest-json-report pytest-asyncio --quiet - if [ -f requirements.txt ]; then - pip install -r requirements.txt --quiet - fi - - - name: Check for test collection errors - id: check-collection - run: | - echo "Running pytest collection check..." - - # Redirect ALL output to files, including subprocess stderr - python -m pytest --collect-only -v > collection_output.txt 2>&1 || true - - # Save to debug file for artifact - echo "=== COLLECTION CHECK OUTPUT ===" >> debug_target_collection.log - cat collection_output.txt >> debug_target_collection.log - - # Set default values - HAS_COLLECTION_ERRORS="false" - NO_TESTS_FOUND="false" - ERROR_TYPE="none" - ERROR_DETAILS="none" - - if grep -q "ImportError\|ModuleNotFoundError\|SyntaxError\|ERROR collecting\|Interrupted:" collection_output.txt; then - echo "::warning::Test discovery errors detected in target branch - Python modules could not be imported correctly" - - # Attempt to identify specific error type - if grep -q "ImportError" collection_output.txt; then - ERROR_TYPE="ImportError" - elif grep -q "ModuleNotFoundError" collection_output.txt; then - ERROR_TYPE="ModuleNotFoundError" - elif grep -q "SyntaxError" collection_output.txt; then - ERROR_TYPE="SyntaxError" - elif grep -q "ERROR collecting" collection_output.txt; then - ERROR_TYPE="CollectionError" - elif grep -q "Interrupted:" collection_output.txt; then - ERROR_TYPE="Interrupted" - else - ERROR_TYPE="UnknownError" - fi - - echo "Target branch discovery error type: $ERROR_TYPE" - - ERROR_FILE=$(grep -o "ERROR collecting.*\.py" collection_output.txt | grep -o "[a-zA-Z0-9_/]*\.py" || echo "Unknown file") - - if [[ "$ERROR_FILE" != "Unknown file" ]]; then - echo "Error in file $ERROR_FILE" - grep -A 15 "$ERROR_FILE" collection_output.txt > error_details.txt - # Truncate to 200 chars for GitHub output, full details go to artifact - ERROR_DETAILS=$(cat error_details.txt | tr '\n' ' ' | sed 's/"/\\"/g' | cut -c1-200) - else - grep -A 15 "ImportError\|ModuleNotFoundError\|SyntaxError\|ERROR collecting\|Interrupted:" collection_output.txt | head -20 > error_details.txt - # Truncate to 200 chars for GitHub output, full details go to artifact - ERROR_DETAILS=$(cat error_details.txt | tr '\n' ' ' | sed 's/"/\\"/g' | cut -c1-200) - fi - - echo "::warning::Target branch discovery error details (truncated): ${ERROR_DETAILS}..." - HAS_COLLECTION_ERRORS="true" - else - echo "No discovery errors detected in target branch" - - TEST_COUNT=$(grep -o "collected [0-9]* item" collection_output.txt | grep -o "[0-9]*" || echo "0") - - if [[ "$TEST_COUNT" == "0" ]]; then - echo "::warning::No tests were found in the target branch" - NO_TESTS_FOUND="true" - ERROR_TYPE="NoTestsFound" - ERROR_DETAILS="No test files discovered matching pytest pattern" - else - echo "Found $TEST_COUNT tests in target branch" - fi - fi - - # Set all the outputs with size limits - echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT - echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT - echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT - echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT - - # For backward compatibility - if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then - echo "has_errors=true" >> $GITHUB_OUTPUT - else - echo "has_errors=false" >> $GITHUB_OUTPUT - fi - - - name: Run tests on target branch - if: steps.check-collection.outputs.has_collection_errors != 'true' - env: - PYTHONUNBUFFERED: "1" - PYTHONIOENCODING: "utf-8" - PYTHONWARNINGS: "ignore" - MIGRATION_MANAGER_LOG_LEVEL: "ERROR" - MIGRATION_MANAGER_QUIET: "true" - SQLALCHEMY_WARN_20: "false" - SQLALCHEMY_SILENCE_UBER_WARNING: "1" - PYTEST_DISABLE_PLUGIN_AUTOLOAD: "1" - PYTEST_CURRENT_TEST: "" - run: | - echo "Running tests on target branch..." - - # Run pytest with aggressive output suppression - redirect everything to log file - exec 3>&1 4>&2 - exec 1>/dev/null 2>/dev/null - - python -m pytest --json-report --json-report-file=target_results.json --tb=no -q --disable-warnings --log-level=CRITICAL --log-cli-level=CRITICAL 2>&1 | tee target_test_run_output.log >/dev/null || true - - # Restore output for final messages - exec 1>&3 2>&4 - - # Save output to debug file for artifact - echo "=== PYTEST RUN OUTPUT ===" >> debug_target_test_run.log - if [ -f target_test_run_output.log ]; then - cat target_test_run_output.log >> debug_target_test_run.log - fi - - if [ -f target_results.json ]; then - echo "Test results file created successfully" - else - echo "::warning::Failed to create test results file for target branch" - fi - - - name: Extract test results and create artifacts - id: extract-results - # Only run if there were no collection errors - if: steps.check-collection.outputs.has_collection_errors != 'true' - run: | - echo "Processing test results for target branch: ${{ inputs.target_branch_to_compare }}" - - # Create debug file for detailed output - exec 3>&1 4>&2 - exec 1> >(tee -a debug_target_extract_results.log) 2>&1 - - python -c " - import json - import sys - import os - - # Default values in case file doesn't exist or is invalid - target_total = 0 - target_passed = 0 - target_percentage = 0 - passing_tests = [] - all_tests = [] - - try: - with open('target_results.json') as f: - target_results = json.load(f) - - # Check for collection errors by looking at exitcode or error patterns - if target_results.get('exitcode', 0) > 1: - target_total = 0 # Explicitly set to 0 - no tests run when collection fails - target_passed = 0 - elif 'summary' in target_results and isinstance(target_results['summary'], dict): - # Normal case - extract data from summary - summary = target_results['summary'] - target_total = summary.get('total', 0) - target_passed = summary.get('passed', 0) - - # Extract passing tests and all tests - if 'tests' in target_results: - for test in target_results['tests']: - outcome = test.get('outcome') - nodeid = test.get('nodeid', '') - if nodeid: - all_tests.append(nodeid) # Track all tests regardless of outcome - if outcome == 'passed': - passing_tests.append(nodeid) - - # Calculate percentage safely - target_percentage = (target_passed / target_total * 100) if target_total > 0 else 0 - - except Exception as e: - pass # Use defaults on any error - - # Set scalar outputs only (no large arrays) - with open(os.environ['GITHUB_OUTPUT'], 'a') as f: - f.write(f'total={target_total}\\n') - f.write(f'passed={target_passed}\\n') - f.write(f'percentage={target_percentage:.2f}\\n') - f.write(f'passing_count={len(passing_tests)}\\n') - - # Save test lists to artifact files instead of job outputs - test_data = { - 'passing_tests': passing_tests, - 'all_tests': all_tests - } - - with open('target_test_data.json', 'w') as f: - json.dump(test_data, f, indent=2) - " - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Target branch test results processed" - - - name: Upload target branch artifacts - if: always() - uses: actions/upload-artifact@v4 - with: - name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - path: | - target_test_data.json - debug_target_*.log - target_test_run_output.log - collection_output.txt - error_details.txt - target_results.json - retention-days: 3 - if-no-files-found: ignore - - # Add a step to set default outputs when collection errors are detected - - name: Set collection error outputs - id: set-error-outputs - if: steps.check-collection.outputs.has_collection_errors == 'true' - run: | - echo "::warning::Setting default outputs for target branch due to collection errors" - echo "total=0" >> $GITHUB_OUTPUT - echo "passed=0" >> $GITHUB_OUTPUT - echo "percentage=0.00" >> $GITHUB_OUTPUT - echo "passing_count=0" >> $GITHUB_OUTPUT - - compare-results: - needs: [test-source-branch, test-target-branch] - runs-on: ${{ inputs.runs_on }} - outputs: - has_regressions: ${{ needs.perform-regression-analysis.outputs.has_regressions }} - regression_count: ${{ needs.perform-regression-analysis.outputs.regression_count }} - - steps: - - name: Install bc - run: | - sudo apt-get update -y - sudo apt-get install -y bc - - - name: Download test data artifacts - uses: actions/download-artifact@v4 - with: - pattern: "*_branch_data_${{ github.event.pull_request.number || github.run_id }}" - path: ./artifacts - merge-multiple: false - - - name: Check for collection errors - run: | - # Create analysis debug file - exec 3>&1 4>&2 - exec 1> >(tee -a debug_comparison_analysis.log) 2>&1 - - echo "Retrieving collection error status information" - PR_COLLECTION_ERRORS="${{ needs.test-source-branch.outputs.collection_errors }}" - PR_NO_TESTS="${{ needs.test-source-branch.outputs.no_tests_found }}" - PR_ERROR_TYPE="${{ needs.test-source-branch.outputs.error_type }}" - PR_ERROR_DETAILS="${{ needs.test-source-branch.outputs.error_details }}" - TARGET_COLLECTION_ERRORS="${{ needs.test-target-branch.outputs.collection_errors }}" - - echo "PR branch collection errors: $PR_COLLECTION_ERRORS" - echo "PR branch no tests found: $PR_NO_TESTS" - echo "PR branch error type: $PR_ERROR_TYPE" - echo "Target branch collection errors: $TARGET_COLLECTION_ERRORS" - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - # Distinct error handling for PR branch - if [[ "$PR_COLLECTION_ERRORS" == "true" ]]; then - echo "::error::Test discovery errors in PR branch: $PR_ERROR_TYPE" - echo "::error::$PR_ERROR_DETAILS" - echo "āŒ PR branch has test discovery errors. Python modules could not be imported correctly." - exit 1 - fi - - if [[ "$PR_NO_TESTS" == "true" ]]; then - echo "::error::No tests were found in the PR branch" - echo "āŒ PR branch has no tests detected. Please add test files that match pytest's discovery pattern." - exit 1 - fi - - # Warning for target branch issues (not a failure) - if [[ "$TARGET_COLLECTION_ERRORS" == "true" ]]; then - echo "āš ļø Target branch has test discovery errors. Tests will still be compared but results may not be accurate." - fi - - if [[ "${{ needs.test-target-branch.outputs.no_tests_found }}" == "true" ]]; then - echo "āš ļø Target branch has no tests detected. PR branch tests will still be evaluated." - fi - - - name: Run regression analysis from artifacts - run: | - # Create analysis debug file - exec 3>&1 4>&2 - exec 1> >(tee -a debug_regression_analysis.log) 2>&1 - - echo "Running regression analysis from artifact data..." - - python3 - << 'EOF' - import json - import os - - try: - # Load test data from artifacts - target_data = {} - pr_data = {} - - # Try to load target branch data - target_artifact_path = './artifacts/target_branch_data_*/target_test_data.json' - import glob - target_files = glob.glob(target_artifact_path) - if target_files: - with open(target_files[0], 'r') as f: - target_data = json.load(f) - print(f"Loaded target data from {target_files[0]}") - else: - print("No target branch test data found") - - # Try to load PR branch data - pr_artifact_path = './artifacts/pr_branch_data_*/pr_test_data.json' - pr_files = glob.glob(pr_artifact_path) - if pr_files: - with open(pr_files[0], 'r') as f: - pr_data = json.load(f) - print(f"Loaded PR data from {pr_files[0]}") - else: - print("No PR branch test data found") - - # Extract test arrays - target_passing = target_data.get('passing_tests', []) - pr_failing = pr_data.get('failing_tests', []) - - print(f"Parsed {len(target_passing)} passing tests from target branch") - print(f"Parsed {len(pr_failing)} failing tests from PR branch") - - # Find regressions using set operations - target_passing_set = set(target_passing) - pr_failing_set = set(pr_failing) - regression_tests = list(target_passing_set.intersection(pr_failing_set)) - - # Write results to file if there are regressions - if regression_tests: - print(f"Found {len(regression_tests)} regression(s)!") - - with open("regression_details.txt", "w") as f: - f.write(f"Found {len(regression_tests)} tests that were passing in target branch but now failing in PR branch:\\n\\n") - for idx, test in enumerate(sorted(regression_tests), 1): - f.write(f"{idx}. {test}\\n") - print("Regression details written to file") - else: - print("No regressions found") - except Exception as e: - print(f"Error in regression analysis: {e}") - import traceback - print(traceback.format_exc()) - EOF - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Regression analysis completed" - - - name: Check for regression details file - id: check-regressions - run: | - _has_regressions="false" - _regression_count="0" - - if [ -f "regression_details.txt" ]; then - echo "Regression details file exists" - # Count regression lines (lines starting with a number and period) - _current_count=$(grep -c "^[0-9]\+\." regression_details.txt || echo "0") - echo "Found $_current_count regression items in file" - - if [ "$_current_count" -gt 0 ]; then - _has_regressions="true" - _regression_count="$_current_count" - echo "::error::Test Regressions Found: $_regression_count test(s) that were passing in target branch are now **failing** in PR branch." - echo "Regression details:" - cat regression_details.txt - else - # File exists but no regressions counted (e.g. empty or malformed) - _has_regressions="false" - _regression_count="0" - fi - else - echo "No regression details file found - no regressions detected" - _has_regressions="false" - _regression_count="0" - fi - - echo "HAS_REGRESSIONS=$_has_regressions" >> $GITHUB_OUTPUT - echo "REGRESSION_COUNT=$_regression_count" >> $GITHUB_OUTPUT - - if [[ "$_has_regressions" == "false" ]]; then - if [ -f regression_details.txt ] && [ "$_has_regressions" == "false" ]; then - echo "::notice::Regression details file (regression_details.txt) was found but no valid regression entries were counted by this step, or the file was empty." - else - echo "No test regressions detected by this step." - fi - fi - - - name: Upload regression details artifact - if: steps.check-regressions.outputs.HAS_REGRESSIONS == 'true' && steps.check-regressions.outputs.REGRESSION_COUNT > 0 - uses: actions/upload-artifact@v4 - with: - name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - path: regression_details.txt - retention-days: 1 - - - name: Check for skip/xfail regressions from artifacts - run: | - # Create analysis debug file - exec 3>&1 4>&2 - exec 1> >(tee -a debug_skip_xfail_analysis.log) 2>&1 - - echo "Checking for tests that were passing in target but are now skipped or xfailed in PR" - - python3 - << 'EOF' - import json - import os - import glob - - try: - # Load test data from artifacts - target_data = {} - pr_data = {} - - # Load target and PR data - target_files = glob.glob('./artifacts/target_branch_data_*/target_test_data.json') - if target_files: - with open(target_files[0], 'r') as f: - target_data = json.load(f) - - pr_files = glob.glob('./artifacts/pr_branch_data_*/pr_test_data.json') - if pr_files: - with open(pr_files[0], 'r') as f: - pr_data = json.load(f) - - # Extract test arrays - target_passing = target_data.get('passing_tests', []) - pr_skipped = pr_data.get('skipped_tests', []) - pr_xfailed = pr_data.get('xfailed_tests', []) - pr_skipped_with_reasons = pr_data.get('skipped_tests_with_reasons', {}) - pr_xfailed_with_reasons = pr_data.get('xfailed_tests_with_reasons', {}) - - print(f"Parsed {len(target_passing)} passing tests from target branch") - print(f"Parsed {len(pr_skipped)} skipped tests from PR branch") - print(f"Parsed {len(pr_xfailed)} xfailed tests from PR branch") - print(f"Parsed {len(pr_skipped_with_reasons)} skipped tests with reasons") - print(f"Parsed {len(pr_xfailed_with_reasons)} xfailed tests with reasons") - - # Find tests that were passing in target but are now skipped or xfailed in PR - target_passing_set = set(target_passing) - pr_skipped_set = set(pr_skipped) - pr_xfailed_set = set(pr_xfailed) - - passing_to_skipped = list(target_passing_set.intersection(pr_skipped_set)) - passing_to_xfailed = list(target_passing_set.intersection(pr_xfailed_set)) - - total_skip_xfail_regressions = len(passing_to_skipped) + len(passing_to_xfailed) - - if total_skip_xfail_regressions > 0: - print(f"Found {total_skip_xfail_regressions} tests that were passing in target but are now skipped/xfailed in PR!") - - # Build comprehensive warning message - warning_parts = [f"Skip/XFail Analysis: {total_skip_xfail_regressions} test(s) that were passing in target branch are now being skipped or xfailed in PR branch."] - - if passing_to_skipped: - warning_parts.append(f"Tests now SKIPPED ({len(passing_to_skipped)}):") - for idx, test in enumerate(sorted(passing_to_skipped), 1): - reason = pr_skipped_with_reasons.get(test, 'No reason provided') - warning_parts.append(f" {idx}. {test} - Reason: {reason}") - - if passing_to_xfailed: - warning_parts.append(f"Tests now XFAILED ({len(passing_to_xfailed)}):") - for idx, test in enumerate(sorted(passing_to_xfailed), 1): - reason = pr_xfailed_with_reasons.get(test, 'No reason provided') - warning_parts.append(f" {idx}. {test} - Reason: {reason}") - - warning_parts.append("While these changes don't fail the workflow, they indicate tests that were working before are now being bypassed. Please review these tests to ensure this is intentional.") - - # Print as single warning annotation - combined_warning = " ".join(warning_parts) - print(f"::warning::{combined_warning}") - else: - print("No skip/xfail regressions found - all previously passing tests are still running.") - except Exception as e: - print(f"Error in skip/xfail analysis: {e}") - import traceback - print(traceback.format_exc()) - EOF - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Skip/xfail regression analysis completed" - - - name: Check for test additions and removals from artifacts - run: | - # Create analysis debug file - exec 3>&1 4>&2 - exec 1> >(tee -a debug_test_changes_analysis.log) 2>&1 - - echo "Checking for test additions and removals between target and PR branches" - - python3 - << 'EOF' - import json - import os - import glob - - try: - # Load test data from artifacts - target_data = {} - pr_data = {} - - # Load target and PR data - target_files = glob.glob('./artifacts/target_branch_data_*/target_test_data.json') - if target_files: - with open(target_files[0], 'r') as f: - target_data = json.load(f) - - pr_files = glob.glob('./artifacts/pr_branch_data_*/pr_test_data.json') - if pr_files: - with open(pr_files[0], 'r') as f: - pr_data = json.load(f) - - # Extract test arrays - target_all = target_data.get('all_tests', []) - pr_all = pr_data.get('all_tests', []) - - print(f"Parsed {len(target_all)} total tests from target branch") - print(f"Parsed {len(pr_all)} total tests from PR branch") - - # Find test additions and removals using set operations - target_all_set = set(target_all) - pr_all_set = set(pr_all) - - removed_tests = list(target_all_set - pr_all_set) # In target but not in PR - added_tests = list(pr_all_set - target_all_set) # In PR but not in target - - # Report removed tests (warnings) - if removed_tests: - print(f"Found {len(removed_tests)} tests that were removed from target branch!") - - # Build comprehensive removal warning - removal_parts = [f"Test Removal Analysis: {len(removed_tests)} test(s) that existed in target branch are missing from PR branch."] - removal_parts.append(f"REMOVED Tests ({len(removed_tests)}):") - for idx, test in enumerate(sorted(removed_tests), 1): - removal_parts.append(f" {idx}. {test}") - removal_parts.append("These test removals should be reviewed to ensure they are intentional. If tests were renamed or moved, this may show as removal + addition.") - - # Print as single warning annotation - combined_removal_warning = " ".join(removal_parts) - print(f"::warning::{combined_removal_warning}") - else: - print("No test removals detected.") - - # Report added tests (notifications/info) - if added_tests: - print(f"Found {len(added_tests)} new tests added in PR branch!") - - # Build comprehensive addition notice - addition_parts = [f"Test Addition Analysis: {len(added_tests)} new test(s) have been added in the PR branch."] - addition_parts.append(f"NEW Tests ({len(added_tests)}):") - for idx, test in enumerate(sorted(added_tests), 1): - addition_parts.append(f" {idx}. {test}") - addition_parts.append("New tests detected - this indicates expanded test coverage!") - - # Print as single notice annotation - combined_addition_notice = " ".join(addition_parts) - print(f"::notice::{combined_addition_notice}") - else: - print("No new tests detected in PR branch.") - - # Summary - if not removed_tests and not added_tests: - print("Test suite composition is unchanged between target and PR branches.") - else: - print(f"Test suite changes: {len(added_tests)} added, {len(removed_tests)} removed") - - except Exception as e: - print(f"Error in test addition/removal analysis: {e}") - import traceback - print(traceback.format_exc()) - EOF - - # Restore stdout/stderr for GitHub Actions - exec 1>&3 2>&4 - - echo "Test addition/removal analysis completed" - - - name: Compare test results - run: | - echo "Test Results Summary:" - echo "Target branch (${{ inputs.target_branch_to_compare }}): ${{ needs.test-target-branch.outputs.passed }}/${{ needs.test-target-branch.outputs.total }} tests passed (${{ needs.test-target-branch.outputs.percentage }}%)" - echo "PR branch: ${{ needs.test-source-branch.outputs.passed }}/${{ needs.test-source-branch.outputs.total }} tests passed (${{ needs.test-source-branch.outputs.percentage }}%)" - - if [[ "${{ needs.test-source-branch.outputs.total }}" == "0" ]]; then - echo "::error::No tests were found in the PR branch" - echo "āŒ PR branch has no tests detected. Please add test files that match pytest's discovery pattern." - exit 1 - fi - - PR_PASSED=${{ needs.test-source-branch.outputs.passed }} - TARGET_PASSED=${{ needs.test-target-branch.outputs.passed }} - PR_PERCENTAGE=${{ needs.test-source-branch.outputs.percentage }} - TARGET_PERCENTAGE=${{ needs.test-target-branch.outputs.percentage }} - PR_TOTAL=${{ needs.test-source-branch.outputs.total }} - TARGET_TOTAL=${{ needs.test-target-branch.outputs.total }} - - # Handle case where target has no tests - if [[ "$TARGET_TOTAL" == "0" ]]; then - if [[ "$PR_PASSED" -gt 0 ]]; then - echo "āœ… PR branch has tests and some are passing (target branch has no tests)" - exit 0 - else - echo "āŒ PR branch has no passing tests" - echo " - Pass percentage: $PR_PERCENTAGE%" - exit 1 - fi - fi - - # Fail if any tests passed in target branch but now fail in PR branch - if [[ "${{ needs.perform-regression-analysis.outputs.has_regressions }}" == "true" ]]; then - echo "āŒ PR branch has test regressions from target branch" - REGRESSION_COUNT_VAL=${{ needs.perform-regression-analysis.outputs.regression_count }} - echo " - $REGRESSION_COUNT_VAL tests that were passing in target branch are now failing" - - echo "### :x: Test Regressions Detected!" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**$REGRESSION_COUNT_VAL test(s) that were passing in the target branch are now failing in the PR branch.**" >> $GITHUB_STEP_SUMMARY - echo "This job (\`compare-results\`) has been marked as failed due to these regressions." >> $GITHUB_STEP_SUMMARY - - if [ -f regression_details.txt ]; then # Check if file exists (it should from previous step) - echo "Failing tests (regressions) list:" - cat regression_details.txt - else - echo "::warning::Regression details file (regression_details.txt) not found in Compare test results step. It might be available as an artifact from the 'perform-regression-analysis' job." - fi - exit 1 - fi - - # Continue with the original comparison if no regressions - if (( $(echo "$PR_PASSED >= $TARGET_PASSED" | bc -l) )) && (( $(echo "$PR_PERCENTAGE >= $TARGET_PERCENTAGE" | bc -l) )); then - echo "āœ… PR branch has equal or better test results than target branch" - - # Additional verbose information about improvement - if (( $(echo "$PR_PASSED > $TARGET_PASSED" | bc -l) )); then - IMPROVEMENT=$(( $PR_PASSED - $TARGET_PASSED )) - echo " - Improvement: $IMPROVEMENT more passing tests than target branch" - fi - - if (( $(echo "$PR_PERCENTAGE > $TARGET_PERCENTAGE" | bc -l) )); then - PERCENTAGE_IMPROVEMENT=$(echo "$PR_PERCENTAGE - $TARGET_PERCENTAGE" | bc -l) - echo " - Percentage improvement: +${PERCENTAGE_IMPROVEMENT}% compared to target branch" - fi - - exit 0 - else - echo "āŒ PR branch has worse test results than target branch" - echo " - Passed tests: $PR_PASSED vs $TARGET_PASSED on target branch" - echo " - Pass percentage: $PR_PERCENTAGE% vs $TARGET_PERCENTAGE% on target branch" - - # Add to job summary for general comparison failure - echo "### :x: Test Comparison Failed" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "The PR branch has worse test results than the target branch:" >> $GITHUB_STEP_SUMMARY - echo "- Passed tests: $PR_PASSED (PR) vs $TARGET_PASSED (Target)" >> $GITHUB_STEP_SUMMARY - echo "- Pass percentage: $PR_PERCENTAGE% (PR) vs $TARGET_PERCENTAGE% (Target)" >> $GITHUB_STEP_SUMMARY - - # Calculate regression metrics - if (( $(echo "$PR_PASSED < $TARGET_PASSED" | bc -l) )); then - REGRESSION=$(( $TARGET_PASSED - $PR_PASSED )) - echo " - Regression: $REGRESSION fewer passing tests than target branch" - fi - - if (( $(echo "$PR_PERCENTAGE < $TARGET_PERCENTAGE" | bc -l) )); then - PERCENTAGE_REGRESSION=$(echo "$TARGET_PERCENTAGE - $PR_PERCENTAGE" | bc -l) - echo " - Percentage regression: -${PERCENTAGE_REGRESSION}% compared to target branch" - fi - - exit 1 - fi - - - name: Upload comparison analysis logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: comparison_analysis_logs_${{ github.event.pull_request.number || github.run_id }} - path: debug_*_analysis.log - retention-days: 3 - if-no-files-found: ignore - - perform-regression-analysis: - needs: [test-source-branch, test-target-branch] - uses: ./.github/workflows/meta-regression-analysis.yml - with: - item_type_singular: "test" - item_type_plural: "tests" - pr_number: ${{ github.event.pull_request.number }} - run_id: ${{ github.run_id }} - target_branch_artifact_name: target_branch_data_${{ github.event.pull_request.number || github.run_id }} - pr_branch_artifact_name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }} - - # Conditionally run notification job only if needed - prepare-notification: - name: Prepare Notification Data - needs: - [ - lint, - test-source-branch, - test-target-branch, - compare-results, - perform-regression-analysis, - ] - # Notify on collection errors, no tests found, compare result failure, or if regressions are detected - if: | - always() && - ( - needs.test-source-branch.outputs.collection_errors == 'true' || - needs.test-source-branch.outputs.no_tests_found == 'true' || - needs.compare-results.result == 'failure' || - needs.perform-regression-analysis.outputs.has_regressions == 'true' - ) - runs-on: ${{ inputs.runs_on }} - outputs: - message_body: ${{ steps.construct_notification.outputs.message_body_out }} - ping_user_ids: ${{ steps.construct_notification.outputs.ping_user_ids_out }} - artifact_path: ${{ steps.construct_notification.outputs.artifact_path_out }} - should_notify: "true" - webhook_available_for_alert: ${{ steps.check_webhook_availability.outputs.webhook_available }} - - steps: - - name: Check for Discord Webhook URL - id: check_webhook_availability - run: | - if [ -z "${{ secrets.DISCORD_WEBHOOK_URL }}" ]; then - echo "::notice::DISCORD_WEBHOOK_URL secret is not set. Discord notifications will likely be skipped by the alert workflow if it relies on this secret." - echo "webhook_available=false" >> $GITHUB_OUTPUT - else - echo "webhook_available=true" >> $GITHUB_OUTPUT - fi - - name: Download regression details (if any) - id: download_regressions - if: needs.perform-regression-analysis.outputs.has_regressions == 'true' && needs.perform-regression-analysis.outputs.regression_count > 0 - uses: actions/download-artifact@v4 - with: - name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests - path: . # Download to current directory - continue-on-error: true - - - name: Construct Discord Notification - id: construct_notification - env: - LINT_RESULT: ${{ needs.lint.result }} - SOURCE_TEST_RESULT: ${{ needs.test-source-branch.result }} - TARGET_TEST_RESULT: ${{ needs.test-target-branch.result }} - COMPARE_RESULT: ${{ needs.compare-results.result }} - PR_COLLECTION_ERRORS: ${{ needs.test-source-branch.outputs.collection_errors }} - PR_NO_TESTS_FOUND: ${{ needs.test-source-branch.outputs.no_tests_found }} - PR_ERROR_TYPE: ${{ needs.test-source-branch.outputs.error_type }} - PR_ERROR_DETAILS_TRUNCATED: ${{ needs.test-source-branch.outputs.error_details }} - HAS_REGRESSIONS: ${{ needs.perform-regression-analysis.outputs.has_regressions }} - REGRESSION_COUNT: ${{ needs.perform-regression-analysis.outputs.regression_count }} - PR_TOTAL_TESTS: ${{ needs.test-source-branch.outputs.total }} - PR_PASSED_TESTS: ${{ needs.test-source-branch.outputs.passed }} - PR_PERCENTAGE: ${{ needs.test-source-branch.outputs.percentage }} - TARGET_TOTAL_TESTS: ${{ needs.test-target-branch.outputs.total }} - TARGET_PASSED_TESTS: ${{ needs.test-target-branch.outputs.passed }} - TARGET_PERCENTAGE: ${{ needs.test-target-branch.outputs.percentage }} - PR_NUMBER: ${{ github.event.pull_request.number }} - PR_TITLE: ${{ github.event.pull_request.title }} - PR_URL: ${{ github.event.pull_request.html_url }} - TARGET_BRANCH_NAME: ${{ inputs.target_branch_to_compare }} - PR_BRANCH_NAME: ${{ github.head_ref }} - REPO_URL: ${{ github.server_url }}/${{ github.repository }} - ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - GH_ASSIGNEES_JSON: ${{ toJson(github.event.pull_request.assignees) }} - USER_MAP_JSON: ${{ secrets.DISCORD_USER_MAP }} - REGRESSION_FILE_PATH: "regression_details.txt" - DOWNLOAD_REGRESSIONS_OUTCOME: ${{ steps.download_regressions.outcome }} - INPUT_PING_LATEST_COMMITTER: ${{ inputs.ping_latest_committer }} - run: | - # Create debug file for detailed notification construction - exec 3>&1 4>&2 - exec 1> >(tee -a debug_notification_construction.log) 2>&1 - - MESSAGE_LINES=() # Use an array to build message lines - PING_KEYS_OUTPUT="" # Will be comma-separated GitHub logins - ARTIFACT_PATH_OUTPUT="" - - echo "Raw GH_ASSIGNEES_JSON value: [$GH_ASSIGNEES_JSON]" - echo "Raw USER_MAP_JSON value: [$USER_MAP_JSON]" - - # 1. Determine Pings - Collect GitHub Logins to pass to alert-discord.yml - # Initialize PING_KEYS_OUTPUT - PING_KEYS_OUTPUT="" - - # Add assignees to PING_KEYS_OUTPUT - if [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && command -v jq &> /dev/null; then - ASSIGNEE_LOGINS_ARRAY=($(echo "$GH_ASSIGNEES_JSON" | jq -r '.[].login // empty')) - echo "GH_ASSIGNEES_JSON received: $GH_ASSIGNEES_JSON" - echo "Extracted ASSIGNEE_LOGINS_ARRAY: (${ASSIGNEE_LOGINS_ARRAY[*]})" - echo "Count of assignees extracted: ${#ASSIGNEE_LOGINS_ARRAY[@]}" - - MAPPED_ASSIGNEE_COUNT=0 - TEMP_PING_KEYS=() - - for assignee_login in "${ASSIGNEE_LOGINS_ARRAY[@]}"; do - if [ -z "$assignee_login" ]; then - echo "Skipping empty assignee login." - continue - fi - echo "Processing assignee for ping: '$assignee_login'" - # Check if this assignee_login exists as a key in USER_MAP_JSON - if echo "$USER_MAP_JSON" | jq -e --arg K "$assignee_login" '.[$K]' > /dev/null; then - echo "Assignee '$assignee_login' FOUND in USER_MAP_JSON." - TEMP_PING_KEYS+=("$assignee_login") - MAPPED_ASSIGNEE_COUNT=$((MAPPED_ASSIGNEE_COUNT + 1)) - else - echo "Assignee '$assignee_login' NOT FOUND in USER_MAP_JSON." - fi - done - - echo "Total assignees found in USER_MAP_JSON and added to pings: $MAPPED_ASSIGNEE_COUNT" - - if [ ${#TEMP_PING_KEYS[@]} -gt 0 ]; then - PING_KEYS_OUTPUT=$(IFS=,; echo "${TEMP_PING_KEYS[*]}") - echo "Initial PING_KEYS_OUTPUT from assignees: [$PING_KEYS_OUTPUT]" - else - echo "No assignees found or GH_ASSIGNEES_JSON was empty, or no assignees were found in USER_MAP_JSON." - fi - elif [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && ! command -v jq &> /dev/null; then - echo "::warning::jq is not available. Cannot determine GitHub users (assignees) for pings." - else - echo "No user map JSON or jq not found. PING_KEYS_OUTPUT (from assignees) will be empty." - fi - - # Add latest committer if INPUT_PING_LATEST_COMMITTER is true - if [[ "$INPUT_PING_LATEST_COMMITTER" == "true" ]]; then - echo "INPUT_PING_LATEST_COMMITTER is true. Attempting to fetch latest committer for PR #${PR_NUMBER}." - if command -v gh &> /dev/null && [ -n "$PR_NUMBER" ]; then - LATEST_COMMITTER_LOGIN_RAW=$(gh pr view "$PR_NUMBER" --json commits --jq '.commits[-1].author.login' 2>/dev/null || echo "") - - if [ -n "$LATEST_COMMITTER_LOGIN_RAW" ] && [ "$LATEST_COMMITTER_LOGIN_RAW" != "null" ]; then - # Apply bot filter (e.g., names ending in [bot] or -bot) - LATEST_COMMITTER_LOGIN=$(echo "$LATEST_COMMITTER_LOGIN_RAW" | grep -v -E -i '(\[bot\]$|-bot$)' || echo "") - - if [ -n "$LATEST_COMMITTER_LOGIN" ]; then - echo "Latest committer identified: $LATEST_COMMITTER_LOGIN" - - # Check if this committer is already in PING_KEYS_OUTPUT - ALREADY_IN_LIST=0 - if [ -n "$PING_KEYS_OUTPUT" ]; then # Only check if PING_KEYS_OUTPUT is not empty - IFS=',' read -ra PING_ARRAY <<< "$PING_KEYS_OUTPUT" - for key in "${PING_ARRAY[@]}"; do - if [[ "$key" == "$LATEST_COMMITTER_LOGIN" ]]; then - ALREADY_IN_LIST=1 - break - fi - done - fi - - if [[ "$ALREADY_IN_LIST" -eq 0 ]]; then - if [ -z "$PING_KEYS_OUTPUT" ]; then - PING_KEYS_OUTPUT="$LATEST_COMMITTER_LOGIN" - else - PING_KEYS_OUTPUT="$PING_KEYS_OUTPUT,$LATEST_COMMITTER_LOGIN" - fi - echo "Added latest committer '$LATEST_COMMITTER_LOGIN' to PING_KEYS_OUTPUT. New list: [$PING_KEYS_OUTPUT]" - else - echo "Latest committer '$LATEST_COMMITTER_LOGIN' is already in PING_KEYS_OUTPUT (likely an assignee)." - fi - else - echo "Latest committer login '$LATEST_COMMITTER_LOGIN_RAW' was filtered out (likely a bot or pattern match) or empty after filter." - fi - else - echo "No latest committer login found for PR #$PR_NUMBER from gh command, or login was null." - fi - else - if ! command -v gh &> /dev/null; then - echo "::warning::gh command not available. Cannot fetch latest committer." - fi - if [ -z "$PR_NUMBER" ]; then - echo "::warning::PR_NUMBER is not set (event might not be a pull_request). Cannot fetch latest committer." - fi - fi - fi - - # Restore stdout/stderr for GitHub Actions to show final summary - exec 1>&3 2>&4 - - # Make this a standard echo for better visibility of the final list - echo "Final Ping Keys Output (GitHub Logins from test-pytest.yml): [$PING_KEYS_OUTPUT]" - echo "ping_user_ids_out=$PING_KEYS_OUTPUT" >> $GITHUB_OUTPUT - - # Store branch names in variables with proper quoting - PR_BRANCH="${PR_BRANCH_NAME:-unknown}" - TARGET_BRANCH="${TARGET_BRANCH_NAME:-unknown}" - - # 2. Construct Message Body - MESSAGE_LINES+=("**Pytest Comparison & Regression Analysis for PR [#${PR_NUMBER}: ${PR_TITLE}](${PR_URL})**") - MESSAGE_LINES+=("Branch: [\`${PR_BRANCH}\`](${REPO_URL}/tree/${PR_BRANCH}) against [\`${TARGET_BRANCH}\`](${REPO_URL}/tree/${TARGET_BRANCH})") - MESSAGE_LINES+=("---") - - # Job Status Summary - MESSAGE_LINES+=("**Job Status:**") - LINT_STATUS="Success" - if [[ "$LINT_RESULT" == "failure" ]]; then LINT_STATUS="Failed"; elif [[ "$LINT_RESULT" == "skipped" ]]; then LINT_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Linting: $LINT_STATUS") - - SOURCE_TEST_STATUS="Success" - if [[ "$SOURCE_TEST_RESULT" == "failure" ]]; then SOURCE_TEST_STATUS="Failed"; elif [[ "$SOURCE_TEST_RESULT" == "skipped" ]]; then SOURCE_TEST_STATUS="Skipped"; fi - MESSAGE_LINES+=("- PR Branch Tests (\`${PR_BRANCH}\`): $SOURCE_TEST_STATUS") - - TARGET_TEST_STATUS="Success" - if [[ "$TARGET_TEST_RESULT" == "failure" ]]; then TARGET_TEST_STATUS="Failed"; elif [[ "$TARGET_TEST_RESULT" == "skipped" ]]; then TARGET_TEST_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Target Branch Tests (\`${TARGET_BRANCH}\`): $TARGET_TEST_STATUS") - - COMPARE_STATUS="Success" - if [[ "$COMPARE_RESULT" == "failure" ]]; then COMPARE_STATUS="Failed"; elif [[ "$COMPARE_RESULT" == "skipped" ]]; then COMPARE_STATUS="Skipped"; fi - MESSAGE_LINES+=("- Comparison & Regression: $COMPARE_STATUS") - MESSAGE_LINES+=("---") - - # Test Discovery Issues in PR Branch - if [[ "$PR_COLLECTION_ERRORS" == "true" ]]; then - MESSAGE_LINES+=("**:red_circle: ERROR: Test Discovery Failed in PR Branch (\`${PR_BRANCH}\`)**") - MESSAGE_LINES+=(" - Type: \`${PR_ERROR_TYPE}\`") - MESSAGE_LINES+=(" - Details: \`\`\`${PR_ERROR_DETAILS_TRUNCATED}\`\`\`") - MESSAGE_LINES+=(" - This usually indicates import errors or syntax issues preventing tests from being collected.") - elif [[ "$PR_NO_TESTS_FOUND" == "true" ]]; then - MESSAGE_LINES+=("**:warning: WARNING: No Tests Found in PR Branch (\`${PR_BRANCH}\`)**") - MESSAGE_LINES+=(" - Pytest did not discover any test files matching its patterns.") - MESSAGE_LINES+=(" - Ensure your test files are correctly named (e.g., \`test_*.py\` or \`*_test.py\`) and located.") - fi - - # Regression Analysis Summary - if [[ "$HAS_REGRESSIONS" == "true" ]]; then - MESSAGE_LINES+=("**:red_circle: REGRESSIONS DETECTED**") - MESSAGE_LINES+=(" - **${REGRESSION_COUNT} test(s)** that were passing in \`${TARGET_BRANCH}\` are now **failing** in \`${PR_BRANCH}\`.") - - # Calculate current message length - CURRENT_MESSAGE=$(printf "%s\\n" "${MESSAGE_LINES[@]}") - CURRENT_LENGTH=${#CURRENT_MESSAGE} - - if [ -f "$REGRESSION_FILE_PATH" ] && [[ "$DOWNLOAD_REGRESSIONS_OUTCOME" == "success" ]]; then - # Read regression details - REGRESSION_LIST=$(awk '/^[0-9]+\./ {sub(/^[0-9]+\. /, "- "); print}' "$REGRESSION_FILE_PATH") - - # Calculate length with regression details - TEMP_MESSAGE="$CURRENT_MESSAGE" - TEMP_MESSAGE+="\`\`\`" - TEMP_MESSAGE+="$REGRESSION_LIST" - TEMP_MESSAGE+="\`\`\`" - TEMP_LENGTH=${#TEMP_MESSAGE} - - if [ $TEMP_LENGTH -le 2000 ]; then - # If total length would be under 2000 chars, include in message - MESSAGE_LINES+=(" - **Failed Tests (Regressions):**") - MESSAGE_LINES+=("\`\`\`") - MESSAGE_LINES+=("$REGRESSION_LIST") - MESSAGE_LINES+=("\`\`\`") - ARTIFACT_PATH_OUTPUT="" # No artifact if details are inline - else - # If would exceed 2000 chars, attach file instead - MESSAGE_LINES+=(" - Details for the ${REGRESSION_COUNT} regressions are in the attached \`regression_details.txt\` file.") - ARTIFACT_PATH_OUTPUT="$REGRESSION_FILE_PATH" - fi - else - MESSAGE_LINES+=(" (Regression details file not found or download failed; cannot list specific regressions here.)") - ARTIFACT_PATH_OUTPUT="" - fi - elif [[ "$COMPARE_RESULT" == "failure" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then - # This case handles general comparison failures NOT due to specific regressions - MESSAGE_LINES+=("**:warning: TEST RESULTS DECLINED**") - MESSAGE_LINES+=(" - The PR branch shows a decrease in test success compared to the target branch, but no specific regressions were identified by the \`meta-regression-analysis\` job.") - MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**") - MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**") - elif [[ "$COMPARE_RESULT" == "success" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then - MESSAGE_LINES+=("**:white_check_mark: NO REGRESSIONS DETECTED**") - MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**") - MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**") - fi - - MESSAGE_LINES+=("---") - MESSAGE_LINES+=("[View Workflow Run](${ACTION_RUN_URL})") - - # Construct with actual newlines - FINAL_MESSAGE_BODY=$(printf "%s\\n" "${MESSAGE_LINES[@]}") - if [ ${#MESSAGE_LINES[@]} -gt 0 ]; then - # Remove the very last actual newline - FINAL_MESSAGE_BODY="${FINAL_MESSAGE_BODY%\\n}" - fi - - echo "Final message body prepared in test-pytest.yml" - - echo "message_body_out<> $GITHUB_OUTPUT - echo "$FINAL_MESSAGE_BODY" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - echo "artifact_path_out=$ARTIFACT_PATH_OUTPUT" >> $GITHUB_OUTPUT - - - name: Upload notification construction debug logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: notification_debug_logs_${{ github.event.pull_request.number || github.run_id }} - path: debug_notification_construction.log - retention-days: 3 - if-no-files-found: ignore - - notify-discord: - name: Send Discord Notification - needs: [prepare-notification] - if: | - always() && - needs.prepare-notification.outputs.should_notify == 'true' && - needs.prepare-notification.outputs.webhook_available_for_alert == 'true' - uses: ./.github/workflows/alert-discord.yml - with: - message_body: ${{ needs.prepare-notification.outputs.message_body }} - ping_user_ids: ${{ needs.prepare-notification.outputs.ping_user_ids }} - artifact_paths: ${{ needs.prepare-notification.outputs.artifact_path }} - should_notify: ${{ needs.prepare-notification.outputs.should_notify }} - runs_on: ${{ inputs.runs_on }} - secrets: - DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }} - DISCORD_USER_MAP: ${{ secrets.DISCORD_USER_MAP }} diff --git a/.github/workflows/test-ts-lint.yml b/.github/workflows/test-ts-lint.yml index 2369565..3843e97 100644 --- a/.github/workflows/test-ts-lint.yml +++ b/.github/workflows/test-ts-lint.yml @@ -30,6 +30,12 @@ on: required: false type: string default: "ubuntu-latest" + target_branch_artifact_name: + required: false + type: string + pr_branch_artifact_name: + required: false + type: string outputs: lint_errors_pr_branch: description: "JSON string of lint errors on the PR branch after attempting fixes." @@ -259,6 +265,8 @@ jobs: item_type_plural: "files with lint errors" pr_number: ${{ github.event.pull_request.number }} run_id: ${{ github.run_id }} + target_branch_artifact_name: ${{ inputs.target_branch_artifact_name }} + pr_branch_artifact_name: ${{ inputs.pr_branch_artifact_name }} # Removed outputs and steps from here, as they are defined by the reusable workflow # and accessed via `needs.analyze-regressions.outputs.*` in subsequent jobs. diff --git a/.github/workflows/tracking-date-reusuable.yml b/.github/workflows/tracking-date-reusuable.yml new file mode 100644 index 0000000..b4ed89e --- /dev/null +++ b/.github/workflows/tracking-date-reusuable.yml @@ -0,0 +1,79 @@ +name: Auto Date Tracking for All Issues + +on: + workflow_dispatch: + +permissions: + issues: write + contents: read + +jobs: + auto-date-tracking: + runs-on: ubuntu-latest + steps: + - name: Set Start and Completion Dates on All Issues + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const owner = context.repo.owner; + const repo = context.repo.repo; + const now = new Date().toISOString().split("T")[0]; + + const issues = await github.paginate( + github.rest.issues.listForRepo, + { + owner, + repo, + state: "all", + per_page: 100 + } + ); + + for (const issue of issues) { + if (issue.pull_request) continue; // skip PRs + + const issue_number = issue.number; + const labels = issue.labels.map(label => label.name); + const startDateComment = `šŸ•’ **Start Date:** ${now}`; + const completeDateComment = `āœ… **Completion Date:** ${now}`; + + const comments = await github.rest.issues.listComments({ + owner, + repo, + issue_number + }); + + const alreadyHasComment = (bodyText) => + comments.data.some(c => + c.body.trim() === bodyText.trim() && + c.user?.type === "Bot" && + c.user?.login === "github-actions[bot]" + ); + + // šŸ•’ Add Start Date if labeled "In Progress" + if (labels.includes("In Progress") && !alreadyHasComment(startDateComment)) { + await github.rest.issues.createComment({ + owner, + repo, + issue_number, + body: startDateComment + }); + console.log(`āœ… Start Date set on issue #${issue_number}`); + } + + // āœ… Add Completion Date if labeled "Complete" and issue is closed + if ( + issue.state === "closed" && !alreadyHasComment(completeDateComment) + ) { + await github.rest.issues.createComment({ + owner, + repo, + issue_number, + body: completeDateComment + }); + console.log(`āœ… Completion Date set on issue #${issue_number}`); + } + } + + console.log("šŸŽ‰ Finished processing all issues."); \ No newline at end of file diff --git a/README.md b/README.md index 6fa326b..162905d 100644 --- a/README.md +++ b/README.md @@ -9,4 +9,6 @@ - `test-`: Offers an action to run a test suite. *If an action is language/framework specific, a `lang-` section follows the above prefix before the descriptor. ## Triggers: -The `triggers` folder contains trigger actions that go into repositories to trigger the core actions. \ No newline at end of file +The `triggers` folder contains trigger actions that go into repositories to trigger the core actions. +- `on-pr-`: Triggers on PR made to specified branches. +- `on-push-`: Triggers on push made to specified branches. diff --git a/scripts/sync_branch_protection.py b/scripts/sync_branch_protection.py new file mode 100644 index 0000000..1bdf806 --- /dev/null +++ b/scripts/sync_branch_protection.py @@ -0,0 +1,39 @@ +import os +from github import Github + +TOKEN = os.getenv("GH_TOKEN") +g = Github(TOKEN) + +SOURCE_REPO = g.get_repo("JamesonRGrieve/Workflows") +TARGET_REPOS = [ + "AGInfrastructure", "AGInteractive", "AGInterface", "AGInYourPC", "AGIteration", + "nursegpt", "nursegpt_web", "ServerFramework", "auth", "zod2gql", "dynamic-form", "ClientFramework" +] +ORG = "JamesonRGrieve" + +PROTECTED_BRANCHES = ["main", "dev", "legacy"] + +rules = { + "enforce_admins": True, + "required_status_checks": None, + "required_pull_request_reviews": { + "dismiss_stale_reviews": True, + "require_code_owner_reviews": True, + "required_approving_review_count": 1, + }, + "restrictions": None, + "allow_force_pushes": False, + "allow_deletions": False, + "required_linear_history": True, + "required_conversation_resolution": True +} + +for repo_name in TARGET_REPOS: + repo = g.get_repo(f"{ORG}/{repo_name}") + for branch in PROTECTED_BRANCHES: + try: + print(f" Protecting {branch} in {repo_name}") + b = repo.get_branch(branch) + b.edit_protection(**rules) + except Exception as e: + print(f" Error protecting {branch} in {repo_name}: {e}") diff --git a/scripts/sync_labels.py b/scripts/sync_labels.py new file mode 100644 index 0000000..ce4a78a --- /dev/null +++ b/scripts/sync_labels.py @@ -0,0 +1,53 @@ +import requests +import os + +TOKEN = os.getenv("GITHUB_TOKEN") +HEADERS = {"Authorization": f"token {TOKEN}"} + +SOURCE_REPO = "JamesonRGrieve/Workflows" +TARGET_REPOS = [ + "JamesonRGrieve/AGInfrastructure", + "JamesonRGrieve/AGInteractive", + "JamesonRGrieve/AGInterface", + "JamesonRGrieve/AGInYourPC", + "JamesonRGrieve/AGIteration", + "JamesonRGrieve/nursegpt", + "JamesonRGrieve/nursegpt_web", + "JamesonRGrieve/ServerFramework", + "JamesonRGrieve/auth", + "JamesonRGrieve/zod2gql", + "JamesonRGrieve/dynamic-form", + "JamesonRGrieve/ClientFramework" +] + +def get_labels(repo): + url = f"https://api.github.com/repos/{repo}/labels" + res = requests.get(url, headers=HEADERS) + if res.status_code == 200: + return res.json() + print(f" Failed to fetch labels from {repo}") + return [] + +def create_or_update_label(repo, label): + url = f"https://api.github.com/repos/{repo}/labels" + res = requests.post(url, headers=HEADERS, json={ + "name": label["name"], + "color": label["color"], + "description": label.get("description", "") + }) + if res.status_code == 422 and "already_exists" in str(res.text): + print(f" Label '{label['name']}' already exists in {repo}") + elif res.status_code != 201: + print(f" Failed to create label '{label['name']}' in {repo}") + else: + print(f" Synced label '{label['name']}' to {repo}") + +def sync_labels(): + labels = get_labels(SOURCE_REPO) + for target_repo in TARGET_REPOS: + print(f"\nSyncing labels to {target_repo}") + for label in labels: + create_or_update_label(target_repo, label) + +if __name__ == "__main__": + sync_labels() \ No newline at end of file diff --git a/scripts/sync_milestones.py b/scripts/sync_milestones.py new file mode 100644 index 0000000..7ca97de --- /dev/null +++ b/scripts/sync_milestones.py @@ -0,0 +1,28 @@ +import os +from github import Github + +TOKEN = os.getenv("GH_TOKEN") +g = Github(TOKEN) + +SOURCE_REPO = g.get_repo("JamesonRGrieve/Workflows") +TARGET_REPOS = [ + "AGInfrastructure", "AGInteractive", "AGInterface", "AGInYourPC", "AGIteration", + "nursegpt", "nursegpt_web", "ServerFramework", "auth", "zod2gql", "dynamic-form", "ClientFramework" +] +ORG = "JamesonRGrieve" + +source_milestones = SOURCE_REPO.get_milestones() + +for repo_name in TARGET_REPOS: + target_repo = g.get_repo(f"{ORG}/{repo_name}") + target_milestones = {m.title: m for m in target_repo.get_milestones(state="all")} + + for m in source_milestones: + if m.title not in target_milestones: + print(f"Creating milestone {m.title} in {repo_name}") + target_repo.create_milestone( + title=m.title, + state=m.state, + description=m.description, + due_on=m.due_on + ) diff --git a/scripts/sync_templates.py b/scripts/sync_templates.py new file mode 100644 index 0000000..815a8a2 --- /dev/null +++ b/scripts/sync_templates.py @@ -0,0 +1,35 @@ +import os +import shutil +from github import Github + +TOKEN = os.getenv("GH_TOKEN") +g = Github(TOKEN) + +SOURCE_REPO = g.get_repo("JamesonRGrieve/Workflows") +TARGET_REPOS = [ + "AGInfrastructure", "AGInteractive", "AGInterface", "AGInYourPC", "AGIteration", + "nursegpt", "nursegpt_web", "ServerFramework", "auth", "zod2gql", "dynamic-form", "ClientFramework" +] +ORG = "JamesonRGrieve" + +TEMPLATE_DIRS = [".github/ISSUE_TEMPLATE", ".github/PULL_REQUEST_TEMPLATE"] + +for repo_name in TARGET_REPOS: + target_repo = g.get_repo(f"{ORG}/{repo_name}") + contents = target_repo.get_contents(".github") + existing_files = [c.path for c in contents] + + for dir_name in TEMPLATE_DIRS: + try: + source_files = SOURCE_REPO.get_contents(dir_name) + for file in source_files: + print(f"\nSyncing {file.path} to {repo_name}") + content = file.decoded_content.decode("utf-8") + target_repo.create_file( + path=file.path, + message=f"sync: update template {file.name}", + content=content, + branch="main" + ) + except Exception as e: + print(f"Skipping {dir_name} for {repo_name}: {e}") diff --git a/triggers/python/on-pr-dev.yml b/triggers/python/on-pr-dev.yml index 7058ad7..78c8a89 100644 --- a/triggers/python/on-pr-dev.yml +++ b/triggers/python/on-pr-dev.yml @@ -7,6 +7,9 @@ on: branches: - dev +permissions: + contents: write + jobs: debug-pr-context: # New job for debugging name: Debug PR Context diff --git a/triggers/python/on-pr-main.yml b/triggers/python/on-pr-main.yml index 05da9ee..4e0a57f 100644 --- a/triggers/python/on-pr-main.yml +++ b/triggers/python/on-pr-main.yml @@ -16,7 +16,6 @@ jobs: id: check_branch run: | IS_FROM_DEV="false" - # For pull requests, check if the source branch is dev if [[ "${{ github.event_name }}" == "pull_request" && "${{ github.head_ref }}" == "dev" ]]; then IS_FROM_DEV="true" echo "This is a pull request coming from the dev branch into main." @@ -28,26 +27,30 @@ jobs: run-tests: name: Run Pytest Comparison - # No explicit check_pr_source dependency, tests should run for any PR to main. - # If tests pass and PR is from dev, then downstream will run. uses: JamesonRGrieve/Workflows/.github/workflows/test-pytest.yml@main with: - # PR is to main, so compare PR branch against main. - target_branch_to_compare: ${{ github.base_ref }} # This will be 'main' + target_branch_to_compare: ${{ github.base_ref }} permissions: - contents: write # For lint within pytest + contents: write + + run-bandit-check: + name: Run Bandit Regression Workflow + uses: JamesonRGrieve/Workflows/.github/workflows/test-bandit.yml@main + with: + target_branch_to_compare: ${{ github.base_ref }} # Usually 'main' in PRs to main + permissions: + contents: read call-reusable-stage-downstream: name: Call Reusable Stage Downstream Workflow - needs: [check-pr-source, run-tests] # Ensures tests pass and branch is correct before staging + needs: [check-pr-source, run-tests, run-bandit-check] if: needs.check-pr-source.outputs.is_from_dev == 'true' uses: JamesonRGrieve/Workflows/.github/workflows/test-downstream.yml@main with: - source_repo_ref: "dev" # Staging happens from the 'dev' branch of the current repo + source_repo_ref: "dev" secrets: PAT_TOKEN: ${{ secrets.PAT_TOKEN }} DOWNSTREAM_REPOSITORIES: ${{ secrets.DOWNSTREAM_REPOSITORIES }} permissions: contents: write pull-requests: write - # Permissions are handled by the reusable workflow itself. diff --git a/triggers/python/on-push-other.yml b/triggers/python/on-push-other.yml index e956325..7c97c3b 100644 --- a/triggers/python/on-push-other.yml +++ b/triggers/python/on-push-other.yml @@ -14,3 +14,5 @@ jobs: # Permissions are handled by the reusable workflow itself or should be inherited. # If secrets were needed, they would be passed here like: # secrets: inherit + permissions: + contents: write diff --git a/triggers/typescript/nextjs/lint.yml b/triggers/typescript/nextjs/lint.yml index 1ea4902..488a780 100644 --- a/triggers/typescript/nextjs/lint.yml +++ b/triggers/typescript/nextjs/lint.yml @@ -9,8 +9,11 @@ on: jobs: call-reusable-lint: name: Call Reusable Lint Workflow - uses: JamesonRGrieve/Workflows/.github/workflows/test-lint-ts.yml@main + uses: JamesonRGrieve/Workflows/.github/workflows/test-ts-lint.yml@main # No specific inputs needed for the reusable lint workflow # Permissions are handled by the reusable workflow itself or should be inherited. # If secrets were needed, they would be passed here like: # secrets: inherit + lint_command: npm run lint + fix_command: npm run lint --fix + diff --git a/triggers/typescript/nextjs/manage-submodules.yml b/triggers/typescript/nextjs/manage-submodules.yml index 5008bc0..cc1436c 100644 --- a/triggers/typescript/nextjs/manage-submodules.yml +++ b/triggers/typescript/nextjs/manage-submodules.yml @@ -11,7 +11,7 @@ jobs: name: Call Reusable Submodule Management # Make sure the reusable workflow is in the same repository at this path # or use //.github/workflows/manage-submodules.yml@ if it's in a central repo. - uses: JamesonRGrieve/Workflows/.github/workflows/manage-submodules.yml@main + uses: JamesonRGrieve/Workflows/.github/workflows/gh-submodules.yml@main with: # REQUIRED: Customize this list of comma-separated file paths # These are the files that will be synced from the main repository to the submodules diff --git a/triggers/typescript/nextjs/test-downstream.yml b/triggers/typescript/nextjs/test-downstream.yml index e9df331..5d69c14 100644 --- a/triggers/typescript/nextjs/test-downstream.yml +++ b/triggers/typescript/nextjs/test-downstream.yml @@ -30,7 +30,7 @@ jobs: name: Run Pytest Comparison # No explicit check_pr_source dependency, tests should run for any PR to main. # If tests pass and PR is from dev, then downstream will run. - uses: JamesonRGrieve/Workflows/.github/workflows/test-pytest.yml@main + uses: JamesonRGrieve/Workflows/.github/workflows/test-py-pytest.yml@main with: # PR is to main, so compare PR branch against main. target_branch_to_compare: ${{ github.base_ref }} # This will be 'main' @@ -41,7 +41,7 @@ jobs: name: Call Reusable Stage Downstream Workflow needs: [check-pr-source, run-tests] # Ensures tests pass and branch is correct before staging if: needs.check-pr-source.outputs.is_from_dev == 'true' - uses: JamesonRGrieve/Workflows/.github/workflows/test-downstream.yml@main + uses: JamesonRGrieve/Workflows/.github/workflows/gh-test-downstream.yml@main with: source_repo_ref: 'dev' # Staging happens from the 'dev' branch of the current repo secrets: diff --git a/usage.mmd b/usage.mmd new file mode 100644 index 0000000..fcaf4b4 --- /dev/null +++ b/usage.mmd @@ -0,0 +1,71 @@ +flowchart LR + TIME["on-day"] + subgraph CM ["Common"] + PR_DEV["on-pr-dev"] + PR_MAIN["on-pr-main"] + PUSH["on-push"] + PUBLISH["on-publish"] + ISSUE_CLOSE["on-issue-close"] + ISSUE["on-issue"] + subgraph FE ["Frontend Repositories"] + FE_PUSH["on-push"] + FE_PUBLISH["on-publish"] + end + subgraph BE ["Backend Repositories"] + BE_PUSH["on-push"] + BE_PUBLISH["on-publish"] + end + end + subgraph WF ["Workflows Repository"] + WF_BUILD_NEXT["build-next"] + WF_ALERT_DISCORD["alert-discord"] + WF_GH_PR_ASSIGNEES["gh-pr-assignees"] + WF_GH_PROJECTS_QA["gh-projects-qa #4"] + WF_GH_PROJECTS_TIME["gh-projects-time #2"] + WF_GH_PROJECTS_BLOCKERS["gh-projects-blockers #3"] + WF_GH_SUBMODULES["gh-submodules"] + WF_GH_TRIGGER_SYNC["gh-trigger-sync #6"] + WF_GH_META_SYNC["gh-meta-sync #5"] + WF_GH_TEST_DOWNSTREAM["gh-test-downstream"] + WF_META_REGRESSION["meta-regression-analysis"] + WF_PUBLISH_GHCR["publish-ghcr"] + WF_PUBLISH_PY_PYPI["publish-py-pypi"] + WF_PUBLISH_STORYBOOK["publish-storybook"] + WF_PUBLISH_TS_NPM["publish-ts-npm"] + WF_TEST_PY_LINT["test-py-lint"] + WF_TEST_PY_PYTEST["test-py-pytest"] + WF_TEST_STORYBOOK["test-storybook"] + WF_TEST_TS_LINT["test-ts-lint"] + end + + DONE["Passed - If PR, Mergable"] + + FE_PUSH --> WF_TEST_TS_LINT + WF_TEST_TS_LINT -.-> WF_BUILD_NEXT + WF_BUILD_NEXT -.-> WF_TEST_STORYBOOK + WF_TEST_STORYBOOK -.-> WF_META_REGRESSION + FE_PUBLISH -->|If has package| WF_PUBLISH_TS_NPM + + BE_PUSH --> WF_TEST_PY_LINT + WF_TEST_PY_LINT -.-> WF_TEST_PY_PYTEST + WF_TEST_PY_PYTEST -.-> WF_META_REGRESSION + BE_PUBLISH --> WF_PUBLISH_PY_PYPI + + PUSH --> |Push to dev or main
Require meta-regression-analysis| WF_PUBLISH_GHCR + PR_MAIN --> WF_GH_TEST_DOWNSTREAM + PUBLISH -->|Require meta-regression-analysis| WF_PUBLISH_GHCR + PUSH --> WF_GH_TRIGGER_SYNC + PR_MAIN --> WF_GH_SUBMODULES + PR_DEV --> WF_GH_SUBMODULES + PR_MAIN --> WF_GH_PR_ASSIGNEES + PR_DEV --> WF_GH_PR_ASSIGNEES + ISSUE_CLOSE --> WF_GH_PROJECTS_TIME + ISSUE --> WF_GH_PROJECTS_BLOCKERS + WF_META_REGRESSION -.->|Pass
If push to dev or main| WF_PUBLISH_GHCR + WF_PUBLISH_GHCR --> DONE + WF_META_REGRESSION -->|Pass
If PR to dev| DONE + WF_META_REGRESSION -.->|Pass
If storybook| WF_PUBLISH_STORYBOOK + WF_META_REGRESSION -.->|Fail| WF_ALERT_DISCORD + WF_GH_TRIGGER_SYNC -.-> WF_GH_META_SYNC + + TIME --> WF_GH_PROJECTS_QA \ No newline at end of file