diff --git a/.github/workflows/run-branch-test.yml b/.github/workflows/run-branch-test.yml index 678e834..dc217bc 100644 --- a/.github/workflows/run-branch-test.yml +++ b/.github/workflows/run-branch-test.yml @@ -95,7 +95,7 @@ jobs: id: cache-keys run: | # Version bump forces cache invalidation when extraction logic changes - CACHE_VERSION="v3" + CACHE_VERSION="v4" BASE_KEY="pytest-${CACHE_VERSION}-${{ inputs.target_branch }}-${{ github.event.pull_request.base.sha || github.sha }}" echo "base_key=$BASE_KEY" >> $GITHUB_OUTPUT echo "pending_key=${BASE_KEY}-pending-${{ github.run_id }}" >> $GITHUB_OUTPUT @@ -360,37 +360,39 @@ jobs: with open('results.json') as f: results = json.load(f) - if results.get('exitcode', 0) <= 1 and 'summary' in results: + # Extract results regardless of exit code - we want to capture + # whatever tests ran, even if pytest had errors + if 'summary' in results: summary = results['summary'] total = summary.get('total', 0) passed = summary.get('passed', 0) - for test in results.get('tests', []): - outcome = test.get('outcome') - nodeid = test.get('nodeid', '') - if not nodeid: - continue - all_tests.append(nodeid) - if outcome == 'passed': - passing_tests.append(nodeid) - elif outcome == 'failed': - failing_tests.append(nodeid) - elif outcome == 'error': - error_tests.append(nodeid) - elif outcome == 'skipped': - skipped_tests.append(nodeid) - reason = test.get('longrepr', 'No reason') - if isinstance(reason, list): - reason = reason[0] if reason else 'No reason' - skipped_with_reasons[nodeid] = str(reason).strip() - elif outcome == 'xfailed': - xfailed_tests.append(nodeid) - reason = test.get('longrepr', 'No reason') - if isinstance(reason, list): - reason = reason[0] if reason else 'No reason' - xfailed_with_reasons[nodeid] = str(reason).strip() - elif outcome == 'xpassed': - xpassed_tests.append(nodeid) + for test in results.get('tests', []): + outcome = test.get('outcome') + nodeid = test.get('nodeid', '') + if not nodeid: + continue + all_tests.append(nodeid) + if outcome == 'passed': + passing_tests.append(nodeid) + elif outcome == 'failed': + failing_tests.append(nodeid) + elif outcome == 'error': + error_tests.append(nodeid) + elif outcome == 'skipped': + skipped_tests.append(nodeid) + reason = test.get('longrepr', 'No reason') + if isinstance(reason, list): + reason = reason[0] if reason else 'No reason' + skipped_with_reasons[nodeid] = str(reason).strip() + elif outcome == 'xfailed': + xfailed_tests.append(nodeid) + reason = test.get('longrepr', 'No reason') + if isinstance(reason, list): + reason = reason[0] if reason else 'No reason' + xfailed_with_reasons[nodeid] = str(reason).strip() + elif outcome == 'xpassed': + xpassed_tests.append(nodeid) percentage = (passed / total * 100) if total > 0 else 0 except FileNotFoundError: diff --git a/.github/workflows/test-py-pytest.yml b/.github/workflows/test-py-pytest.yml index 22d65a4..b366f33 100644 --- a/.github/workflows/test-py-pytest.yml +++ b/.github/workflows/test-py-pytest.yml @@ -202,37 +202,39 @@ jobs: with open('results.json') as f: results = json.load(f) - if results.get('exitcode', 0) <= 1 and 'summary' in results: + # Extract results regardless of exit code - we want to capture + # whatever tests ran, even if pytest had errors + if 'summary' in results: summary = results['summary'] total = summary.get('total', 0) passed = summary.get('passed', 0) - for test in results.get('tests', []): - outcome = test.get('outcome') - nodeid = test.get('nodeid', '') - if not nodeid: - continue - all_tests.append(nodeid) - if outcome == 'passed': - passing_tests.append(nodeid) - elif outcome == 'failed': - failing_tests.append(nodeid) - elif outcome == 'error': - error_tests.append(nodeid) - elif outcome == 'skipped': - skipped_tests.append(nodeid) - reason = test.get('longrepr', 'No reason') - if isinstance(reason, list): - reason = reason[0] if reason else 'No reason' - skipped_with_reasons[nodeid] = str(reason).strip() - elif outcome == 'xfailed': - xfailed_tests.append(nodeid) - reason = test.get('longrepr', 'No reason') - if isinstance(reason, list): - reason = reason[0] if reason else 'No reason' - xfailed_with_reasons[nodeid] = str(reason).strip() - elif outcome == 'xpassed': - xpassed_tests.append(nodeid) + for test in results.get('tests', []): + outcome = test.get('outcome') + nodeid = test.get('nodeid', '') + if not nodeid: + continue + all_tests.append(nodeid) + if outcome == 'passed': + passing_tests.append(nodeid) + elif outcome == 'failed': + failing_tests.append(nodeid) + elif outcome == 'error': + error_tests.append(nodeid) + elif outcome == 'skipped': + skipped_tests.append(nodeid) + reason = test.get('longrepr', 'No reason') + if isinstance(reason, list): + reason = reason[0] if reason else 'No reason' + skipped_with_reasons[nodeid] = str(reason).strip() + elif outcome == 'xfailed': + xfailed_tests.append(nodeid) + reason = test.get('longrepr', 'No reason') + if isinstance(reason, list): + reason = reason[0] if reason else 'No reason' + xfailed_with_reasons[nodeid] = str(reason).strip() + elif outcome == 'xpassed': + xpassed_tests.append(nodeid) percentage = (passed / total * 100) if total > 0 else 0 except FileNotFoundError: