Skip to content

Apply pytest improvements to storybook workflow #71

Apply pytest improvements to storybook workflow

Apply pytest improvements to storybook workflow #71

Workflow file for this run

name: Reusable Compare Mocha Results
on:
workflow_call:
inputs:
target_branch_to_compare:
description: "The target branch to compare against (e.g., main, refs/heads/main)."
required: true
type: string
node-version:
description: "Node.js version to use for testing."
required: false
type: string
default: "18"
install-command:
description: "Optional command to install dependencies (defaults to npm/pnpm/yarn auto-detection)."
required: false
type: string
default: ""
mocha-command:
description: "Base command used to invoke Mocha (e.g., npx mocha, pnpm exec mocha)."
required: false
type: string
default: "npx mocha"
mocha-extra-args:
description: "Additional arguments to pass to the Mocha command (applied before workflow-managed flags)."
required: false
type: string
default: "--reporter json"
working-directory:
description: "Directory where install and test commands should be executed."
required: false
type: string
default: "."
ping_latest_committer:
description: "If true, the latest committer on the PR will be added to the ping list."
required: false
type: boolean
default: false
runs_on:
required: false
type: string
default: '["self-hosted", "multithreaded"]'
secrets:
DISCORD_WEBHOOK_URL:
description: "Discord Webhook URL for failure notifications. If not provided, notifications are skipped."
required: false
DISCORD_USER_MAP:
description: 'JSON string mapping GitHub usernames to Discord User IDs (e.g., {"user1":"id1"}). If not provided, users won''t be pinged.'
required: false
outputs:
pr_total:
description: "Total tests in PR/source branch"
value: ${{ jobs.test-source-branch.outputs.total }}
pr_passed:
description: "Passed tests in PR/source branch"
value: ${{ jobs.test-source-branch.outputs.passed }}
pr_percentage:
description: "Pass percentage in PR/source branch"
value: ${{ jobs.test-source-branch.outputs.percentage }}
pr_collection_errors:
description: "PR branch has collection errors"
value: ${{ jobs.test-source-branch.outputs.collection_errors }}
pr_no_tests_found:
description: "PR branch has no tests found"
value: ${{ jobs.test-source-branch.outputs.no_tests_found }}
target_total:
description: "Total tests in target branch"
value: ${{ jobs.test-target-branch.outputs.total }}
target_passed:
description: "Passed tests in target branch"
value: ${{ jobs.test-target-branch.outputs.passed }}
target_percentage:
description: "Pass percentage in target branch"
value: ${{ jobs.test-target-branch.outputs.percentage }}
has_regressions:
description: "Boolean indicating if regressions were found"
value: ${{ jobs.compare-results.outputs.has_regressions }}
regression_count:
description: "Number of test regressions found"
value: ${{ jobs.compare-results.outputs.regression_count }}
jobs:
test-source-branch:
runs-on: ${{ fromJSON(inputs.runs_on) }}
outputs:
total: ${{ steps.extract-results.outputs.total }}
passed: ${{ steps.extract-results.outputs.passed }}
percentage: ${{ steps.extract-results.outputs.percentage }}
collection_errors: ${{ steps.check-collection.outputs.has_collection_errors }}
no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }}
has_errors: ${{ steps.check-collection.outputs.has_errors }}
error_type: ${{ steps.check-collection.outputs.error_type }}
error_details: ${{ steps.check-collection.outputs.error_details }}
failing_count: ${{ steps.extract-results.outputs.failing_count }}
skipped_count: ${{ steps.extract-results.outputs.skipped_count }}
xfailed_count: ${{ steps.extract-results.outputs.xfailed_count }}
steps:
- name: Checkout PR Branch
uses: actions/checkout@v4.2.2
with:
submodules: "recursive"
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "${{ inputs['node-version'] }}"
- name: Install dependencies
run: |
set -e
if command -v corepack >/dev/null 2>&1; then
corepack enable >/dev/null 2>&1 || true
fi
INSTALL_COMMAND="${{ inputs['install-command'] }}"
if [ -n "$INSTALL_COMMAND" ]; then
echo "Running custom install command: $INSTALL_COMMAND"
eval "$INSTALL_COMMAND"
elif [ -f package-lock.json ]; then
echo "Detected package-lock.json; running npm ci"
npm ci
elif [ -f yarn.lock ]; then
if command -v yarn >/dev/null 2>&1; then
echo "Detected yarn.lock; running yarn install --frozen-lockfile"
yarn install --frozen-lockfile
else
echo "::warning::yarn.lock detected but yarn is unavailable. Falling back to npm install."
npm install
fi
elif [ -f pnpm-lock.yaml ]; then
if command -v pnpm >/dev/null 2>&1; then
echo "Detected pnpm-lock.yaml; running pnpm install --frozen-lockfile"
pnpm install --frozen-lockfile
else
echo "::warning::pnpm-lock.yaml detected but pnpm is unavailable. Falling back to npm install."
npm install
fi
else
echo "No lockfile detected; running npm install"
npm install
fi
working-directory: ${{ inputs['working-directory'] }}
- name: Check for test collection errors
id: check-collection
run: |
echo "Running Mocha collection check..."
HAS_COLLECTION_ERRORS="false"
NO_TESTS_FOUND="false"
ERROR_TYPE="none"
ERROR_DETAILS="none"
MOCHA_COMMAND="${{ inputs['mocha-command'] }}"
MOCHA_EXTRA_ARGS="${{ inputs['mocha-extra-args'] }}"
# Ensure the Mocha command includes a JSON reporter so that discovery output is machine readable
if ! echo "$MOCHA_EXTRA_ARGS" | grep -q "--reporter"; then
MOCHA_EXTRA_ARGS="$MOCHA_EXTRA_ARGS --reporter json"
fi
COLLECTION_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS --dry-run"
echo "Executing: $COLLECTION_COMMAND"
set +e
eval "$COLLECTION_COMMAND" > collection_output.json 2> collection_output.txt
EXIT_CODE=$?
set -e
if [ "$EXIT_CODE" -ne 0 ]; then
HAS_COLLECTION_ERRORS="true"
if grep -qi "Cannot find module" collection_output.txt; then
ERROR_TYPE="ModuleNotFoundError"
elif grep -qi "SyntaxError" collection_output.txt; then
ERROR_TYPE="SyntaxError"
elif grep -qi "TypeError" collection_output.txt; then
ERROR_TYPE="TypeError"
elif grep -qi "ReferenceError" collection_output.txt; then
ERROR_TYPE="ReferenceError"
else
ERROR_TYPE="ExecutionError"
fi
ERROR_DETAILS=$(head -40 collection_output.txt | tr '\n' ' ' | sed 's/"/\\"/g')
echo "::error::Test discovery errors detected in PR branch via Mocha ($ERROR_TYPE)"
echo "::error::First details: ${ERROR_DETAILS:0:200}..."
else
NODE <<'NODE'
const fs = require('fs');
const path = require('path');
const outputPath = path.resolve(process.cwd(), 'collection_output.json');
let discoveredCount = 0;
try {
const raw = fs.readFileSync(outputPath, 'utf-8');
if (raw.trim()) {
const data = JSON.parse(raw);
if (data && data.stats && typeof data.stats.tests === 'number') {
discoveredCount = data.stats.tests;
} else if (Array.isArray(data.tests)) {
discoveredCount = data.tests.length;
}
}
} catch (error) {
console.log(`::warning::Unable to parse Mocha discovery JSON: ${error.message}`);
}
fs.writeFileSync(
process.env.GITHUB_OUTPUT,
`tests_discovered=${discoveredCount}\n`,
{ flag: 'a' }
);
NODE
TEST_COUNT=$(grep -o "tests_discovered=[0-9]*" $GITHUB_OUTPUT | tail -1 | cut -d'=' -f2)
if [ -z "$TEST_COUNT" ]; then
TEST_COUNT=0
fi
if [ "$TEST_COUNT" = "0" ]; then
NO_TESTS_FOUND="true"
ERROR_TYPE="NoTestsFound"
ERROR_DETAILS="Mocha --dry-run did not discover any tests"
echo "::warning::No tests were found in the PR branch"
else
echo "Found $TEST_COUNT test(s) in PR branch"
fi
fi
echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT
echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT
echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT
echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT
if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then
echo "has_errors=true" >> $GITHUB_OUTPUT
else
echo "has_errors=false" >> $GITHUB_OUTPUT
fi
if [[ "$HAS_COLLECTION_ERRORS" == "true" ]]; then
echo "❌ Discovery Error: $ERROR_TYPE"
elif [[ "$NO_TESTS_FOUND" == "true" ]]; then
echo "⚠️ No Tests Found"
else
echo "✅ Discovery Success"
fi
working-directory: ${{ inputs['working-directory'] }}
- name: Run tests on PR Branch
if: steps.check-collection.outputs.has_collection_errors != 'true'
run: |
echo "Running Mocha tests on PR branch..."
MOCHA_COMMAND="${{ inputs['mocha-command'] }}"
MOCHA_EXTRA_ARGS="${{ inputs['mocha-extra-args'] }}"
if ! echo "$MOCHA_EXTRA_ARGS" | grep -q "--reporter"; then
MOCHA_EXTRA_ARGS="$MOCHA_EXTRA_ARGS --reporter json"
fi
TEST_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS"
echo "Executing: $TEST_COMMAND"
set +e
eval "$TEST_COMMAND" > >(tee pr_results.json) 2> test_output.txt
EXIT_CODE=$?
set -e
if [ -s pr_results.json ]; then
echo "✅ Test execution completed (exit code: $EXIT_CODE)"
else
echo "❌ Test execution did not produce pr_results.json (exit code: $EXIT_CODE)"
fi
working-directory: ${{ inputs['working-directory'] }}
- name: Extract test results and create artifacts
id: extract-results
run: |
echo "PR_BRANCH=$(git rev-parse --abbrev-ref HEAD)" >> $GITHUB_ENV
node <<'NODE'
const fs = require('fs');
const path = require('path');
const resultsPath = path.resolve(process.cwd(), 'pr_results.json');
let prTotal = 0;
let prPassed = 0;
let prPercentage = 0;
const passingTests = [];
const failingTests = [];
const skippedTests = [];
const xfailedTests = [];
const skippedTestsWithReasons = {};
const xfailedTestsWithReasons = {};
const allTests = [];
const warningsList = [];
const safeRead = (filePath) => {
try {
return fs.readFileSync(filePath, 'utf-8');
} catch (error) {
return null;
}
};
const rawResults = safeRead(resultsPath);
if (rawResults) {
try {
const data = JSON.parse(rawResults);
const stats = data?.stats ?? {};
const tests = Array.isArray(data?.tests) ? data.tests : [];
prTotal = Number.isFinite(stats.tests) ? Number(stats.tests) : tests.length;
prPassed = Number.isFinite(stats.passes)
? Number(stats.passes)
: Array.isArray(data?.passes)
? data.passes.length
: 0;
for (const test of tests) {
const suitePath = test.file ? path.relative(process.cwd(), test.file) || test.file : '';
const fullTitle = test.fullTitle || test.title || 'Unnamed test';
const identifier = suitePath ? `${suitePath}::${fullTitle}` : fullTitle;
const state = test.state || (test.pending ? 'pending' : undefined);
allTests.push(identifier);
switch (state) {
case 'passed':
passingTests.push(identifier);
break;
case 'failed':
failingTests.push(identifier);
break;
case 'pending':
skippedTests.push(identifier);
skippedTestsWithReasons[identifier] =
(test.err && test.err.message) || 'Marked as pending in Mocha output';
break;
default: {
const matchBy = (collection) =>
Array.isArray(collection)
? collection.find(
(item) =>
item &&
item.fullTitle === test.fullTitle &&
(item.file === test.file || (!item.file && !test.file)),
)
: undefined;
if (!state) {
if (matchBy(data?.failures)) {
failingTests.push(identifier);
break;
}
if (matchBy(data?.passes)) {
passingTests.push(identifier);
break;
}
const pendingMatch = matchBy(data?.pending);
if (pendingMatch) {
skippedTests.push(identifier);
skippedTestsWithReasons[identifier] =
(pendingMatch.err && pendingMatch.err.message) || 'Marked as pending in Mocha output';
break;
}
}
skippedTests.push(identifier);
skippedTestsWithReasons[identifier] = 'Test state unknown; treated as skipped';
break;
}
}
}
if (!prPassed && passingTests.length) {
prPassed = passingTests.length;
}
if (prTotal > 0) {
prPercentage = (prPassed / prTotal) * 100;
}
} catch (error) {
console.error(`Failed to parse Mocha JSON results: ${error.message}`);
}
}
const summary = {
total: prTotal,
passed: prPassed,
percentage: Number.isFinite(prPercentage) ? prPercentage.toFixed(2) : '0.00',
};
const output = {
summary,
passing_tests: passingTests,
failing_tests: failingTests,
skipped_tests: skippedTests,
xfailed_tests: xfailedTests,
all_tests: allTests,
skipped_tests_with_reasons: skippedTestsWithReasons,
xfailed_tests_with_reasons: xfailedTestsWithReasons,
warnings: warningsList,
};
fs.writeFileSync('pr_test_data.json', JSON.stringify(output, null, 2));
const outputLines = [
`total=${prTotal}`,
`passed=${prPassed}`,
`percentage=${Number.isFinite(prPercentage) ? prPercentage.toFixed(2) : '0.00'}`,
`failing_count=${failingTests.length}`,
`skipped_count=${skippedTests.length}`,
`xfailed_count=${xfailedTests.length}`,
];
fs.appendFileSync(process.env.GITHUB_OUTPUT, `${outputLines.join('\n')}\n`);
NODE
echo "✅ Test results: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} passed (${{ steps.extract-results.outputs.percentage }}%)"
working-directory: ${{ inputs['working-directory'] }}
- name: Upload PR branch artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }}
path: |
${{ inputs['working-directory'] }}/pr_test_data.json
${{ inputs['working-directory'] }}/test_output.txt
${{ inputs['working-directory'] }}/pr_results.json
${{ inputs['working-directory'] }}/collection_output.json
${{ inputs['working-directory'] }}/collection_output.txt
retention-days: 3
if-no-files-found: ignore
test-target-branch:
runs-on: ${{ fromJSON(inputs.runs_on) }}
outputs:
total: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.total || steps.extract-results.outputs.total }}
passed: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passed || steps.extract-results.outputs.passed }}
percentage: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.percentage || steps.extract-results.outputs.percentage }}
collection_errors: ${{ steps.check-collection.outputs.has_collection_errors }}
no_tests_found: ${{ steps.check-collection.outputs.no_tests_found }}
has_errors: ${{ steps.check-collection.outputs.has_errors }}
error_type: ${{ steps.check-collection.outputs.error_type }}
error_details: ${{ steps.check-collection.outputs.error_details }}
passing_count: ${{ steps.check-collection.outputs.has_collection_errors == 'true' && steps.set-error-outputs.outputs.passing_count || steps.extract-results.outputs.passing_count }}
steps:
- name: Checkout target branch
uses: actions/checkout@v4.2.2
with:
submodules: "recursive"
ref: ${{ inputs.target_branch_to_compare }}
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "${{ inputs['node-version'] }}"
- name: Install dependencies
run: |
set -e
if command -v corepack >/dev/null 2>&1; then
corepack enable >/dev/null 2>&1 || true
fi
INSTALL_COMMAND="${{ inputs['install-command'] }}"
if [ -n "$INSTALL_COMMAND" ]; then
echo "Running custom install command: $INSTALL_COMMAND"
eval "$INSTALL_COMMAND"
elif [ -f package-lock.json ]; then
echo "Detected package-lock.json; running npm ci"
npm ci
elif [ -f yarn.lock ]; then
if command -v yarn >/dev/null 2>&1; then
echo "Detected yarn.lock; running yarn install --frozen-lockfile"
yarn install --frozen-lockfile
else
echo "::warning::yarn.lock detected but yarn is unavailable. Falling back to npm install."
npm install
fi
elif [ -f pnpm-lock.yaml ]; then
if command -v pnpm >/dev/null 2>&1; then
echo "Detected pnpm-lock.yaml; running pnpm install --frozen-lockfile"
pnpm install --frozen-lockfile
else
echo "::warning::pnpm-lock.yaml detected but pnpm is unavailable. Falling back to npm install."
npm install
fi
else
echo "No lockfile detected; running npm install"
npm install
fi
working-directory: ${{ inputs['working-directory'] }}
- name: Check for test collection errors
id: check-collection
run: |
exec 3>&1 4>&2
exec 1> >(tee -a debug_target_collection.log) 2>&1
echo "Running Mocha collection check on target branch..."
HAS_COLLECTION_ERRORS="false"
NO_TESTS_FOUND="false"
ERROR_TYPE="none"
ERROR_DETAILS="none"
MOCHA_COMMAND="${{ inputs['mocha-command'] }}"
MOCHA_EXTRA_ARGS="${{ inputs['mocha-extra-args'] }}"
if ! echo "$MOCHA_EXTRA_ARGS" | grep -q "--reporter"; then
MOCHA_EXTRA_ARGS="$MOCHA_EXTRA_ARGS --reporter json"
fi
COLLECTION_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS --dry-run"
echo "Executing: $COLLECTION_COMMAND"
set +e
eval "$COLLECTION_COMMAND" > collection_output.json 2> collection_output.txt
EXIT_CODE=$?
set -e
exec 1>&3 2>&4
if [ "$EXIT_CODE" -ne 0 ]; then
HAS_COLLECTION_ERRORS="true"
if grep -qi "Cannot find module" collection_output.txt; then
ERROR_TYPE="ModuleNotFoundError"
elif grep -qi "SyntaxError" collection_output.txt; then
ERROR_TYPE="SyntaxError"
elif grep -qi "TypeError" collection_output.txt; then
ERROR_TYPE="TypeError"
elif grep -qi "ReferenceError" collection_output.txt; then
ERROR_TYPE="ReferenceError"
else
ERROR_TYPE="ExecutionError"
fi
ERROR_DETAILS=$(head -40 collection_output.txt | tr '
' ' ' | sed 's/"/\"/g')

Check failure on line 548 in .github/workflows/test-js-mocha.yml

View workflow run for this annotation

GitHub Actions / .github/workflows/test-js-mocha.yml

Invalid workflow file

You have an error in your yaml syntax on line 548
echo "::warning::Test discovery errors detected in target branch via Mocha ($ERROR_TYPE)"
echo "::warning::First details: ${ERROR_DETAILS:0:200}..."
else
NODE <<'NODE'
const fs = require('fs');
const path = require('path');
const outputPath = path.resolve(process.cwd(), 'collection_output.json');
let discoveredCount = 0;
try {
const raw = fs.readFileSync(outputPath, 'utf-8');
if (raw.trim()) {
const data = JSON.parse(raw);
if (data && data.stats && typeof data.stats.tests === 'number') {
discoveredCount = data.stats.tests;
} else if (Array.isArray(data.tests)) {
discoveredCount = data.tests.length;
}
}
} catch (error) {
console.log(`::warning::Unable to parse Mocha discovery JSON for target branch: ${error.message}`);
}
fs.writeFileSync(
process.env.GITHUB_OUTPUT,
`target_tests_discovered=${discoveredCount}
`,
{ flag: 'a' }
);
NODE
TEST_COUNT=$(grep -o "target_tests_discovered=[0-9]*" $GITHUB_OUTPUT | tail -1 | cut -d'=' -f2)
if [ -z "$TEST_COUNT" ]; then
TEST_COUNT=0
fi
if [ "$TEST_COUNT" = "0" ]; then
NO_TESTS_FOUND="true"
ERROR_TYPE="NoTestsFound"
ERROR_DETAILS="Mocha --dry-run did not discover any tests on the target branch"
echo "::warning::No tests were found in the target branch"
else
echo "Found $TEST_COUNT test(s) in target branch"
fi
fi
echo "has_collection_errors=$HAS_COLLECTION_ERRORS" >> $GITHUB_OUTPUT
echo "no_tests_found=$NO_TESTS_FOUND" >> $GITHUB_OUTPUT
echo "error_type=$ERROR_TYPE" >> $GITHUB_OUTPUT
echo "error_details=$ERROR_DETAILS" >> $GITHUB_OUTPUT
if [[ "$HAS_COLLECTION_ERRORS" == "true" || "$NO_TESTS_FOUND" == "true" ]]; then
echo "has_errors=true" >> $GITHUB_OUTPUT
else
echo "has_errors=false" >> $GITHUB_OUTPUT
fi
echo "=== FULL COLLECTION OUTPUT ===" >> debug_target_collection.log
cat collection_output.txt >> debug_target_collection.log
working-directory: ${{ inputs['working-directory'] }}
- name: Run tests on target branch
if: steps.check-collection.outputs.has_collection_errors != 'true'
run: |
echo "Running Mocha tests on target branch..."
MOCHA_COMMAND="${{ inputs['mocha-command'] }}"
MOCHA_EXTRA_ARGS="${{ inputs['mocha-extra-args'] }}"
if ! echo "$MOCHA_EXTRA_ARGS" | grep -q "--reporter"; then
MOCHA_EXTRA_ARGS="$MOCHA_EXTRA_ARGS --reporter json"
fi
TEST_COMMAND="$MOCHA_COMMAND $MOCHA_EXTRA_ARGS"
echo "Executing: $TEST_COMMAND"
set +e
eval "$TEST_COMMAND" > >(tee target_results.json) 2> target_test_output.txt
EXIT_CODE=$?
set -e
if [ -s target_results.json ]; then
echo "✅ Test execution completed (exit code: $EXIT_CODE)"
else
echo "❌ Test execution did not produce target_results.json (exit code: $EXIT_CODE)"
fi
working-directory: ${{ inputs['working-directory'] }}
- name: Extract test results and create artifacts
id: extract-results
# Only run if there were no collection errors
if: steps.check-collection.outputs.has_collection_errors != 'true'
run: |
echo "Processing test results for target branch: ${{ inputs.target_branch_to_compare }}"
# Create debug file for detailed output
exec 3>&1 4>&2
exec 1> >(tee -a debug_target_extract_results.log) 2>&1
node <<'NODE'
const fs = require('fs');
const path = require('path');
const resultsPath = path.resolve(process.cwd(), 'target_results.json');
let targetTotal = 0;
let targetPassed = 0;
let targetPercentage = 0;
const passingTests = [];
const failingTests = [];
const skippedTests = [];
const xfailedTests = [];
const skippedTestsWithReasons = {};
const xfailedTestsWithReasons = {};
const allTests = [];
const warningsList = [];
const safeRead = (filePath) => {
try {
return fs.readFileSync(filePath, 'utf-8');
} catch (error) {
return null;
}
};
const rawResults = safeRead(resultsPath);
if (rawResults) {
try {
const data = JSON.parse(rawResults);
const stats = data?.stats ?? {};
const tests = Array.isArray(data?.tests) ? data.tests : [];
targetTotal = Number.isFinite(stats.tests) ? Number(stats.tests) : tests.length;
targetPassed = Number.isFinite(stats.passes)
? Number(stats.passes)
: Array.isArray(data?.passes)
? data.passes.length
: 0;
for (const test of tests) {
const suitePath = test.file ? path.relative(process.cwd(), test.file) || test.file : '';
const fullTitle = test.fullTitle || test.title || 'Unnamed test';
const identifier = suitePath ? `${suitePath}::${fullTitle}` : fullTitle;
const state = test.state || (test.pending ? 'pending' : undefined);
allTests.push(identifier);
switch (state) {
case 'passed':
passingTests.push(identifier);
break;
case 'failed':
failingTests.push(identifier);
break;
case 'pending':
skippedTests.push(identifier);
skippedTestsWithReasons[identifier] =
(test.err && test.err.message) || 'Marked as pending in Mocha output';
break;
default: {
const matchBy = (collection) =>
Array.isArray(collection)
? collection.find(
(item) =>
item &&
item.fullTitle === test.fullTitle &&
(item.file === test.file || (!item.file && !test.file)),
)
: undefined;
if (!state) {
if (matchBy(data?.failures)) {
failingTests.push(identifier);
break;
}
if (matchBy(data?.passes)) {
passingTests.push(identifier);
break;
}
const pendingMatch = matchBy(data?.pending);
if (pendingMatch) {
skippedTests.push(identifier);
skippedTestsWithReasons[identifier] =
(pendingMatch.err && pendingMatch.err.message) || 'Marked as pending in Mocha output';
break;
}
}
skippedTests.push(identifier);
skippedTestsWithReasons[identifier] = 'Test state unknown; treated as skipped';
break;
}
}
}
if (!targetPassed && passingTests.length) {
targetPassed = passingTests.length;
}
if (targetTotal > 0) {
targetPercentage = (targetPassed / targetTotal) * 100;
}
} catch (error) {
console.error(`Failed to parse Mocha JSON results: ${error.message}`);
}
}
const summary = {
total: targetTotal,
passed: targetPassed,
percentage: Number.isFinite(targetPercentage) ? targetPercentage.toFixed(2) : '0.00',
};
const output = {
summary,
passing_tests: passingTests,
failing_tests: failingTests,
skipped_tests: skippedTests,
xfailed_tests: xfailedTests,
all_tests: allTests,
skipped_tests_with_reasons: skippedTestsWithReasons,
xfailed_tests_with_reasons: xfailedTestsWithReasons,
warnings: warningsList,
};
fs.writeFileSync('target_test_data.json', JSON.stringify(output, null, 2));
const outputLines = [
`total=${targetTotal}`,
`passed=${targetPassed}`,
`percentage=${Number.isFinite(targetPercentage) ? targetPercentage.toFixed(2) : '0.00'}`,
`passing_count=${passingTests.length}`,
];
fs.appendFileSync(process.env.GITHUB_OUTPUT, `${outputLines.join('\n')}\n`);
NODE
# Restore stdout/stderr for GitHub Actions
exec 1>&3 2>&4
echo "Target branch test results processed: ${{ steps.extract-results.outputs.passed }}/${{ steps.extract-results.outputs.total }} tests passed (${{ steps.extract-results.outputs.percentage }}%)"
working-directory: ${{ inputs['working-directory'] }}
- name: Upload target branch artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: target_branch_data_${{ github.event.pull_request.number || github.run_id }}
path: |
${{ inputs['working-directory'] }}/target_test_data.json
${{ inputs['working-directory'] }}/target_test_output.txt
${{ inputs['working-directory'] }}/target_results.json
${{ inputs['working-directory'] }}/collection_output.json
${{ inputs['working-directory'] }}/collection_output.txt
${{ inputs['working-directory'] }}/debug_target_collection.log
${{ inputs['working-directory'] }}/debug_target_extract_results.log
retention-days: 3
if-no-files-found: ignore
# Add a step to set default outputs when collection errors are detected
- name: Set collection error outputs
id: set-error-outputs
if: steps.check-collection.outputs.has_collection_errors == 'true'
run: |
echo "::warning::Setting default outputs for target branch due to collection errors"
echo "total=0" >> $GITHUB_OUTPUT
echo "passed=0" >> $GITHUB_OUTPUT
echo "percentage=0.00" >> $GITHUB_OUTPUT
echo "passing_count=0" >> $GITHUB_OUTPUT
compare-results:
needs: [test-source-branch, test-target-branch]
uses: ./.github/workflows/regression-test.yml
with:
runs_on: ${{ inputs.runs_on }}
baseline_label: ${{ inputs.target_branch_to_compare }}
baseline_results_artifact: target_branch_data_${{ github.event.pull_request.number || github.run_id }}
baseline_results_filename: target_test_data.json
current_label: ${{ github.head_ref || github.ref_name || 'source branch' }}
current_results_artifact: pr_branch_data_${{ github.event.pull_request.number || github.run_id }}
current_results_filename: pr_test_data.json
baseline_passed: ${{ needs.test-target-branch.outputs.passed }}
baseline_total: ${{ needs.test-target-branch.outputs.total }}
baseline_percentage: ${{ needs.test-target-branch.outputs.percentage }}
current_passed: ${{ needs.test-source-branch.outputs.passed }}
current_total: ${{ needs.test-source-branch.outputs.total }}
current_percentage: ${{ needs.test-source-branch.outputs.percentage }}
baseline_collection_errors: ${{ needs.test-target-branch.outputs.collection_errors }}
baseline_no_tests_found: ${{ needs.test-target-branch.outputs.no_tests_found }}
current_collection_errors: ${{ needs.test-source-branch.outputs.collection_errors }}
current_no_tests_found: ${{ needs.test-source-branch.outputs.no_tests_found }}
artifact_name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests
perform-regression-analysis:
needs: [test-source-branch, test-target-branch]
uses: ./.github/workflows/meta-regression-analysis.yml
with:
item_type_singular: "test"
item_type_plural: "tests"
pr_number: ${{ github.event.pull_request.number }}
run_id: ${{ github.run_id }}
target_branch_artifact_name: target_branch_data_${{ github.event.pull_request.number || github.run_id }}
pr_branch_artifact_name: pr_branch_data_${{ github.event.pull_request.number || github.run_id }}
# Conditionally run notification job only if needed
prepare-notification:
name: Prepare Notification Data
needs:
[
test-source-branch,
test-target-branch,
compare-results,
perform-regression-analysis,
]
# Notify on collection errors, no tests found, compare result failure, or if regressions are detected
if: |
always() &&
(
needs.test-source-branch.outputs.collection_errors == 'true' ||
needs.test-source-branch.outputs.no_tests_found == 'true' ||
needs.compare-results.result == 'failure' ||
needs.perform-regression-analysis.outputs.has_regressions == 'true'
)
runs-on: ${{ fromJSON(inputs.runs_on) }}
outputs:
message_body: ${{ steps.construct_notification.outputs.message_body_out }}
ping_user_ids: ${{ steps.construct_notification.outputs.ping_user_ids_out }}
artifact_path: ${{ steps.construct_notification.outputs.artifact_path_out }}
should_notify: "true"
webhook_available_for_alert: ${{ steps.check_webhook_availability.outputs.webhook_available }}
steps:
- name: Check for Discord Webhook URL
id: check_webhook_availability
run: |
if [ -z "${{ secrets.DISCORD_WEBHOOK_URL }}" ]; then
echo "::notice::DISCORD_WEBHOOK_URL secret is not set. Discord notifications will likely be skipped by the alert workflow if it relies on this secret."
echo "webhook_available=false" >> $GITHUB_OUTPUT
else
echo "webhook_available=true" >> $GITHUB_OUTPUT
fi
- name: Download regression details (if any)
id: download_regressions
if: always()
uses: actions/download-artifact@v4
with:
name: regression_details_pr_${{ github.event.pull_request.number || github.run_id }}_tests
path: . # Download to current directory
continue-on-error: true
- name: Check downloaded regression file
if: always()
run: |
echo "Checking for regression details file..."
if [ -f "regression_details.txt" ]; then
echo "✅ Regression details file found"
echo "File size: $(wc -c < regression_details.txt) bytes"
echo "First few lines:"
head -5 regression_details.txt
else
echo "❌ Regression details file not found"
fi
if [ -f "comprehensive_regression_report.txt" ]; then
echo "✅ Comprehensive regression report found"
echo "File size: $(wc -c < comprehensive_regression_report.txt) bytes"
else
echo "❌ Comprehensive regression report not found"
fi
- name: Construct Discord Notification
id: construct_notification
env:
LINT_RESULT: ${{ needs.lint.result }}
SOURCE_TEST_RESULT: ${{ needs.test-source-branch.result }}
TARGET_TEST_RESULT: ${{ needs.test-target-branch.result }}
COMPARE_RESULT: ${{ needs.compare-results.result }}
PR_COLLECTION_ERRORS: ${{ needs.test-source-branch.outputs.collection_errors }}
PR_NO_TESTS_FOUND: ${{ needs.test-source-branch.outputs.no_tests_found }}
PR_ERROR_TYPE: ${{ needs.test-source-branch.outputs.error_type }}
PR_ERROR_DETAILS_TRUNCATED: ${{ needs.test-source-branch.outputs.error_details }}
HAS_REGRESSIONS: ${{ needs.perform-regression-analysis.outputs.has_regressions }}
REGRESSION_COUNT: ${{ needs.perform-regression-analysis.outputs.regression_count }}
PR_TOTAL_TESTS: ${{ needs.test-source-branch.outputs.total }}
PR_PASSED_TESTS: ${{ needs.test-source-branch.outputs.passed }}
PR_PERCENTAGE: ${{ needs.test-source-branch.outputs.percentage }}
TARGET_TOTAL_TESTS: ${{ needs.test-target-branch.outputs.total }}
TARGET_PASSED_TESTS: ${{ needs.test-target-branch.outputs.passed }}
TARGET_PERCENTAGE: ${{ needs.test-target-branch.outputs.percentage }}
PR_NUMBER: ${{ github.event.pull_request.number }}
PR_TITLE: ${{ github.event.pull_request.title }}
PR_URL: ${{ github.event.pull_request.html_url }}
TARGET_BRANCH_NAME: ${{ inputs.target_branch_to_compare }}
PR_BRANCH_NAME: ${{ github.head_ref }}
REPO_URL: ${{ github.server_url }}/${{ github.repository }}
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
GH_ASSIGNEES_JSON: ${{ toJson(github.event.pull_request.assignees) }}
USER_MAP_JSON: ${{ secrets.DISCORD_USER_MAP || '{}' }}
REGRESSION_FILE_PATH: "regression_details.txt"
DOWNLOAD_REGRESSIONS_OUTCOME: ${{ steps.download_regressions.outcome }}
INPUT_PING_LATEST_COMMITTER: ${{ inputs.ping_latest_committer }}
run: |
# Create debug file for detailed notification construction
exec 3>&1 4>&2
exec 1> >(tee -a debug_notification_construction.log) 2>&1
MESSAGE_LINES=() # Use an array to build message lines
PING_KEYS_OUTPUT="" # Will be comma-separated GitHub logins
ARTIFACT_PATH_OUTPUT=""
echo "Raw GH_ASSIGNEES_JSON value: [$GH_ASSIGNEES_JSON]"
echo "Raw USER_MAP_JSON value: [$USER_MAP_JSON]"
# 1. Determine Pings - Collect GitHub Logins to pass to alert-discord.yml
# Initialize PING_KEYS_OUTPUT
PING_KEYS_OUTPUT=""
# Add assignees to PING_KEYS_OUTPUT
if [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && command -v jq &> /dev/null; then
ASSIGNEE_LOGINS_ARRAY=($(echo "$GH_ASSIGNEES_JSON" | jq -r '.[].login // empty'))
echo "GH_ASSIGNEES_JSON received: $GH_ASSIGNEES_JSON"
echo "Extracted ASSIGNEE_LOGINS_ARRAY: (${ASSIGNEE_LOGINS_ARRAY[*]})"
echo "Count of assignees extracted: ${#ASSIGNEE_LOGINS_ARRAY[@]}"
MAPPED_ASSIGNEE_COUNT=0
TEMP_PING_KEYS=()
for assignee_login in "${ASSIGNEE_LOGINS_ARRAY[@]}"; do
if [ -z "$assignee_login" ]; then
echo "Skipping empty assignee login."
continue
fi
echo "Processing assignee for ping: '$assignee_login'"
# Check if this assignee_login exists as a key in USER_MAP_JSON
if echo "$USER_MAP_JSON" | jq -e --arg K "$assignee_login" '.[$K]' > /dev/null; then
echo "Assignee '$assignee_login' FOUND in USER_MAP_JSON."
TEMP_PING_KEYS+=("$assignee_login")
MAPPED_ASSIGNEE_COUNT=$((MAPPED_ASSIGNEE_COUNT + 1))
else
echo "Assignee '$assignee_login' NOT FOUND in USER_MAP_JSON."
fi
done
echo "Total assignees found in USER_MAP_JSON and added to pings: $MAPPED_ASSIGNEE_COUNT"
if [ ${#TEMP_PING_KEYS[@]} -gt 0 ]; then
PING_KEYS_OUTPUT=$(IFS=,; echo "${TEMP_PING_KEYS[*]}")
echo "Initial PING_KEYS_OUTPUT from assignees: [$PING_KEYS_OUTPUT]"
else
echo "No assignees found or GH_ASSIGNEES_JSON was empty, or no assignees were found in USER_MAP_JSON."
fi
elif [ -n "$USER_MAP_JSON" ] && [ "$USER_MAP_JSON" != "{}" ] && ! command -v jq &> /dev/null; then
echo "::warning::jq is not available. Cannot determine GitHub users (assignees) for pings."
else
echo "No user map JSON or jq not found. PING_KEYS_OUTPUT (from assignees) will be empty."
fi
# Add latest committer if INPUT_PING_LATEST_COMMITTER is true
if [[ "$INPUT_PING_LATEST_COMMITTER" == "true" ]]; then
echo "INPUT_PING_LATEST_COMMITTER is true. Attempting to fetch latest committer for PR #${PR_NUMBER}."
if command -v gh &> /dev/null && [ -n "$PR_NUMBER" ]; then
LATEST_COMMITTER_LOGIN_RAW=$(gh pr view "$PR_NUMBER" --json commits --jq '.commits[-1].author.login' 2>/dev/null || echo "")
if [ -n "$LATEST_COMMITTER_LOGIN_RAW" ] && [ "$LATEST_COMMITTER_LOGIN_RAW" != "null" ]; then
# Apply bot filter (e.g., names ending in [bot] or -bot)
LATEST_COMMITTER_LOGIN=$(echo "$LATEST_COMMITTER_LOGIN_RAW" | grep -v -E -i '(\[bot\]$|-bot$)' || echo "")
if [ -n "$LATEST_COMMITTER_LOGIN" ]; then
echo "Latest committer identified: $LATEST_COMMITTER_LOGIN"
# Check if this committer is already in PING_KEYS_OUTPUT
ALREADY_IN_LIST=0
if [ -n "$PING_KEYS_OUTPUT" ]; then # Only check if PING_KEYS_OUTPUT is not empty
IFS=',' read -ra PING_ARRAY <<< "$PING_KEYS_OUTPUT"
for key in "${PING_ARRAY[@]}"; do
if [[ "$key" == "$LATEST_COMMITTER_LOGIN" ]]; then
ALREADY_IN_LIST=1
break
fi
done
fi
if [[ "$ALREADY_IN_LIST" -eq 0 ]]; then
if [ -z "$PING_KEYS_OUTPUT" ]; then
PING_KEYS_OUTPUT="$LATEST_COMMITTER_LOGIN"
else
PING_KEYS_OUTPUT="$PING_KEYS_OUTPUT,$LATEST_COMMITTER_LOGIN"
fi
echo "Added latest committer '$LATEST_COMMITTER_LOGIN' to PING_KEYS_OUTPUT. New list: [$PING_KEYS_OUTPUT]"
else
echo "Latest committer '$LATEST_COMMITTER_LOGIN' is already in PING_KEYS_OUTPUT (likely an assignee)."
fi
else
echo "Latest committer login '$LATEST_COMMITTER_LOGIN_RAW' was filtered out (likely a bot or pattern match) or empty after filter."
fi
else
echo "No latest committer login found for PR #$PR_NUMBER from gh command, or login was null."
fi
else
if ! command -v gh &> /dev/null; then
echo "::warning::gh command not available. Cannot fetch latest committer."
fi
if [ -z "$PR_NUMBER" ]; then
echo "::warning::PR_NUMBER is not set (event might not be a pull_request). Cannot fetch latest committer."
fi
fi
fi
# Restore stdout/stderr for GitHub Actions to show final summary
exec 1>&3 2>&4
# Make this a standard echo for better visibility of the final list
echo "Final Ping Keys Output (GitHub Logins from test-js-mocha.yml): [$PING_KEYS_OUTPUT]"
echo "ping_user_ids_out=$PING_KEYS_OUTPUT" >> $GITHUB_OUTPUT
# Store branch names in variables with proper quoting
PR_BRANCH="${PR_BRANCH_NAME:-unknown}"
TARGET_BRANCH="${TARGET_BRANCH_NAME:-unknown}"
# 2. Construct Message Body
MESSAGE_LINES+=("**Mocha Comparison & Regression Analysis for PR [#${PR_NUMBER}: ${PR_TITLE}](${PR_URL})**")
MESSAGE_LINES+=("Branch: [\`${PR_BRANCH}\`](${REPO_URL}/tree/${PR_BRANCH}) against [\`${TARGET_BRANCH}\`](${REPO_URL}/tree/${TARGET_BRANCH})")
MESSAGE_LINES+=("---")
# Job Status Summary
MESSAGE_LINES+=("**Job Status:**")
LINT_STATUS="Success"
if [[ "$LINT_RESULT" == "failure" ]]; then LINT_STATUS="Failed"; elif [[ "$LINT_RESULT" == "skipped" ]]; then LINT_STATUS="Skipped"; fi
MESSAGE_LINES+=("- Linting: $LINT_STATUS")
SOURCE_TEST_STATUS="Success"
if [[ "$SOURCE_TEST_RESULT" == "failure" ]]; then SOURCE_TEST_STATUS="Failed"; elif [[ "$SOURCE_TEST_RESULT" == "skipped" ]]; then SOURCE_TEST_STATUS="Skipped"; fi
MESSAGE_LINES+=("- PR Branch Tests (\`${PR_BRANCH}\`): $SOURCE_TEST_STATUS")
TARGET_TEST_STATUS="Success"
if [[ "$TARGET_TEST_RESULT" == "failure" ]]; then TARGET_TEST_STATUS="Failed"; elif [[ "$TARGET_TEST_RESULT" == "skipped" ]]; then TARGET_TEST_STATUS="Skipped"; fi
MESSAGE_LINES+=("- Target Branch Tests (\`${TARGET_BRANCH}\`): $TARGET_TEST_STATUS")
COMPARE_STATUS="Success"
if [[ "$COMPARE_RESULT" == "failure" ]]; then COMPARE_STATUS="Failed"; elif [[ "$COMPARE_RESULT" == "skipped" ]]; then COMPARE_STATUS="Skipped"; fi
MESSAGE_LINES+=("- Comparison & Regression: $COMPARE_STATUS")
MESSAGE_LINES+=("---")
# Test Discovery Issues in PR Branch
if [[ "$PR_COLLECTION_ERRORS" == "true" ]]; then
MESSAGE_LINES+=("**:red_circle: ERROR: Test Discovery Failed in PR Branch (\`${PR_BRANCH}\`)**")
MESSAGE_LINES+=(" - Type: \`${PR_ERROR_TYPE}\`")
MESSAGE_LINES+=(" - This usually indicates import errors or syntax issues preventing tests from being collected.")
MESSAGE_LINES+=(" - See attached file for detailed error information.")
elif [[ "$PR_NO_TESTS_FOUND" == "true" ]]; then
MESSAGE_LINES+=("**:warning: WARNING: No Tests Found in PR Branch (\`${PR_BRANCH}\`)**")
MESSAGE_LINES+=(" - Mocha did not discover any test files matching its patterns.")
MESSAGE_LINES+=(" - Ensure your test files are correctly named (e.g., \`test_*.py\` or \`*_test.py\`) and located.")
fi
# Regression Analysis Summary
if [[ "$HAS_REGRESSIONS" == "true" ]]; then
MESSAGE_LINES+=("**:red_circle: REGRESSIONS DETECTED**")
# Check if we have comprehensive regression file with categories
if [ -f "comprehensive_regression_report.txt" ]; then
# Extract counts from comprehensive report
PASS_FAIL_COUNT=$(grep -o "PASS-TO-FAIL REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
PASS_SKIP_COUNT=$(grep -o "PASS-TO-SKIP REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
FAIL_SKIP_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-SKIP IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
FAIL_PASS_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-PASS IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
PASS_GONE_COUNT=$(grep -o "PASS-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
FAIL_GONE_COUNT=$(grep -o "FAIL-TO-GONE REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
DISCOVERY_COUNT=$(grep -o "DISCOVERY REGRESSIONS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
NEW_TESTS_COUNT=$(grep -o "NEW TESTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
# Add category summaries (≤5 show paths, >5 show count + refer to file)
if [[ "$PASS_FAIL_COUNT" -gt 0 ]]; then
if [[ "$PASS_FAIL_COUNT" -le 5 ]]; then
MESSAGE_LINES+=("**Pass→Fail ($PASS_FAIL_COUNT):**")
readarray -t test_paths < <(grep -A 100 "PASS-TO-FAIL REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_FAIL_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=("**Pass→Fail:** $PASS_FAIL_COUNT tests (see attached file)")
fi
fi
if [[ "$PASS_SKIP_COUNT" -gt 0 ]]; then
if [[ "$PASS_SKIP_COUNT" -le 5 ]]; then
MESSAGE_LINES+=("**Pass→Skip ($PASS_SKIP_COUNT):**")
readarray -t test_paths < <(grep -A 100 "PASS-TO-SKIP REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_SKIP_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=("**Pass→Skip:** $PASS_SKIP_COUNT tests (see attached file)")
fi
fi
if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 ]]; then
if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -le 5 ]]; then
MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements ($FAIL_SKIP_IMPROVEMENTS_COUNT):**")
readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements:** $FAIL_SKIP_IMPROVEMENTS_COUNT tests (see attached file)")
fi
fi
if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 ]]; then
if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -le 5 ]]; then
MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements ($FAIL_PASS_IMPROVEMENTS_COUNT):**")
readarray -t test_paths < <(grep -A 100 "FAIL-TO-PASS IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_PASS_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements:** $FAIL_PASS_IMPROVEMENTS_COUNT tests (see attached file)")
fi
fi
if [[ "$PASS_GONE_COUNT" -gt 0 ]]; then
if [[ "$PASS_GONE_COUNT" -le 5 ]]; then
MESSAGE_LINES+=("**Pass→Gone ($PASS_GONE_COUNT):**")
readarray -t test_paths < <(grep -A 100 "PASS-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$PASS_GONE_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=("**Pass→Gone:** $PASS_GONE_COUNT tests (see attached file)")
fi
fi
if [[ "$FAIL_GONE_COUNT" -gt 0 ]]; then
if [[ "$FAIL_GONE_COUNT" -le 5 ]]; then
MESSAGE_LINES+=("**Fail→Gone ($FAIL_GONE_COUNT):**")
readarray -t test_paths < <(grep -A 100 "FAIL-TO-GONE REGRESSIONS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_GONE_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=("**Fail→Gone:** $FAIL_GONE_COUNT tests (see attached file)")
fi
fi
if [[ "$DISCOVERY_COUNT" -gt 0 ]]; then
if [[ "$DISCOVERY_COUNT" -le 5 ]]; then
MESSAGE_LINES+=("**Discovery Warnings ($DISCOVERY_COUNT):**")
MESSAGE_LINES+=("• $DISCOVERY_COUNT new warnings (see attached file)")
else
MESSAGE_LINES+=("**Discovery Warnings:** $DISCOVERY_COUNT warnings (see attached file)")
fi
fi
if [[ "$NEW_TESTS_COUNT" -gt 0 ]]; then
if [[ "$NEW_TESTS_COUNT" -le 5 ]]; then
MESSAGE_LINES+=(":sparkles: **New Tests ($NEW_TESTS_COUNT):**")
readarray -t test_paths < <(grep -A 100 "NEW TESTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$NEW_TESTS_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=(":sparkles: **New Tests:** $NEW_TESTS_COUNT tests (see attached file)")
fi
fi
else
# Fallback to simple regression count
MESSAGE_LINES+=(" - **${REGRESSION_COUNT} test(s)** that were passing in \`${TARGET_BRANCH}\` are now **failing** in \`${PR_BRANCH}\`.")
fi
elif [[ "$COMPARE_RESULT" == "failure" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then
# This case handles general comparison failures NOT due to specific regressions
MESSAGE_LINES+=("**:warning: TEST RESULTS DECLINED**")
MESSAGE_LINES+=(" - The PR branch shows a decrease in test success compared to the target branch, but no specific regressions were identified by the \`meta-regression-analysis\` job.")
MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**")
MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**")
elif [[ "$COMPARE_RESULT" == "success" ]] && [[ "$HAS_REGRESSIONS" != "true" ]]; then
MESSAGE_LINES+=("**:white_check_mark: NO REGRESSIONS DETECTED**")
MESSAGE_LINES+=(" - PR Branch (\`${PR_BRANCH}\`): **${PR_PASSED_TESTS}/${PR_TOTAL_TESTS} passed (${PR_PERCENTAGE}%)**")
MESSAGE_LINES+=(" - Target Branch (\`${TARGET_BRANCH}\`): **${TARGET_PASSED_TESTS}/${TARGET_TOTAL_TESTS} passed (${TARGET_PERCENTAGE}%)**")
fi
if [[ "$HAS_REGRESSIONS" != "true" ]] && [ -f "comprehensive_regression_report.txt" ]; then
FAIL_SKIP_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-SKIP IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
FAIL_PASS_IMPROVEMENTS_COUNT=$(grep -o "FAIL-TO-PASS IMPROVEMENTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
NEW_TESTS_COUNT=$(grep -o "NEW TESTS (\([0-9]*\)" comprehensive_regression_report.txt | grep -o "[0-9]*" || echo "0")
if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 || "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 || "$NEW_TESTS_COUNT" -gt 0 ]]; then
MESSAGE_LINES+=("**:sparkles: Improvements & Additions**")
if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -gt 0 ]]; then
if [[ "$FAIL_SKIP_IMPROVEMENTS_COUNT" -le 5 ]]; then
MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements ($FAIL_SKIP_IMPROVEMENTS_COUNT):**")
readarray -t test_paths < <(grep -A 100 "FAIL-TO-SKIP IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_SKIP_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=(":white_check_mark: **Fail→Skip Improvements:** $FAIL_SKIP_IMPROVEMENTS_COUNT tests (see attached file)")
fi
fi
if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -gt 0 ]]; then
if [[ "$FAIL_PASS_IMPROVEMENTS_COUNT" -le 5 ]]; then
MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements ($FAIL_PASS_IMPROVEMENTS_COUNT):**")
readarray -t test_paths < <(grep -A 100 "FAIL-TO-PASS IMPROVEMENTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$FAIL_PASS_IMPROVEMENTS_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=(":white_check_mark: **Fail→Pass Improvements:** $FAIL_PASS_IMPROVEMENTS_COUNT tests (see attached file)")
fi
fi
if [[ "$NEW_TESTS_COUNT" -gt 0 ]]; then
if [[ "$NEW_TESTS_COUNT" -le 5 ]]; then
MESSAGE_LINES+=(":sparkles: **New Tests ($NEW_TESTS_COUNT):**")
readarray -t test_paths < <(grep -A 100 "NEW TESTS" comprehensive_regression_report.txt | grep "^ [0-9]\+\." | head -$NEW_TESTS_COUNT | sed 's/^ [0-9]\+\. //')
for test_path in "${test_paths[@]}"; do
MESSAGE_LINES+=("• \`$test_path\`")
done
else
MESSAGE_LINES+=(":sparkles: **New Tests:** $NEW_TESTS_COUNT tests (see attached file)")
fi
fi
fi
fi
MESSAGE_LINES+=("---")
MESSAGE_LINES+=("[View Workflow Run](${ACTION_RUN_URL})")
# Set artifact path - always prefer comprehensive report if it exists
if [ -f "comprehensive_regression_report.txt" ]; then
ARTIFACT_PATH_OUTPUT="comprehensive_regression_report.txt"
elif [ -f "$REGRESSION_FILE_PATH" ] && [[ "$DOWNLOAD_REGRESSIONS_OUTCOME" == "success" ]]; then
ARTIFACT_PATH_OUTPUT="$REGRESSION_FILE_PATH"
else
ARTIFACT_PATH_OUTPUT=""
fi
# Construct with actual newlines
FINAL_MESSAGE_BODY=$(printf "%s\\n" "${MESSAGE_LINES[@]}")
if [ ${#MESSAGE_LINES[@]} -gt 0 ]; then
# Remove the very last actual newline
FINAL_MESSAGE_BODY="${FINAL_MESSAGE_BODY%\\n}"
fi
echo "Final message body prepared in test-js-mocha.yml"
echo "message_body_out<<EOF" >> $GITHUB_OUTPUT
echo "$FINAL_MESSAGE_BODY" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
echo "artifact_path_out=$ARTIFACT_PATH_OUTPUT" >> $GITHUB_OUTPUT
notify-discord:
name: Send Discord Notification
needs: [prepare-notification]
if: |
always() &&
needs.prepare-notification.outputs.should_notify == 'true' &&
needs.prepare-notification.outputs.webhook_available_for_alert == 'true'
uses: ./.github/workflows/alert-discord.yml
with:
message_body: ${{ needs.prepare-notification.outputs.message_body }}
ping_user_ids: ${{ needs.prepare-notification.outputs.ping_user_ids }}
artifact_paths: ${{ needs.prepare-notification.outputs.artifact_path }}
should_notify: ${{ needs.prepare-notification.outputs.should_notify }}
runs_on: ${{ inputs.runs_on }}
secrets:
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }}
DISCORD_USER_MAP: ${{ secrets.DISCORD_USER_MAP }}