Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 32 additions & 34 deletions .github/workflows/pr-validation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,30 +23,31 @@ jobs:
PR_BODY="${{ github.event.pull_request.body }}"

# Check if PR title or body contains issue reference
# Accepts: TRCLI-### (JIRA), GIT-### (GitHub), #123 (GitHub), issues/123
if echo "$PR_TITLE $PR_BODY" | grep -qE "TRCLI-[0-9]+|GIT-[0-9]+|#[0-9]+|issues/[0-9]+"; then
echo "issue_found=true" >> $GITHUB_OUTPUT
if echo "$PR_TITLE $PR_BODY" | grep -qE "(TRCLI-[0-9]+|GIT-[0-9]+|#[0-9]+|issues/[0-9]+)"; then
echo "issue_found=true" >> "$GITHUB_OUTPUT"

# Extract the issue key/number
if echo "$PR_TITLE $PR_BODY" | grep -qE "TRCLI-[0-9]+"; then
ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "TRCLI-[0-9]+" | head -1)
echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT
echo "issue_type=JIRA" >> $GITHUB_OUTPUT
echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT"
echo "issue_type=JIRA" >> "$GITHUB_OUTPUT"

elif echo "$PR_TITLE $PR_BODY" | grep -qE "GIT-[0-9]+"; then
ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "GIT-[0-9]+" | head -1)
echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT
echo "issue_type=GitHub" >> $GITHUB_OUTPUT
echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT"
echo "issue_type=GitHub" >> "$GITHUB_OUTPUT"

elif echo "$PR_TITLE $PR_BODY" | grep -qE "#[0-9]+"; then
ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "#[0-9]+" | head -1)
echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT
echo "issue_type=GitHub" >> $GITHUB_OUTPUT
echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT"
echo "issue_type=GitHub" >> "$GITHUB_OUTPUT"

elif echo "$PR_BODY" | grep -qE "issues/[0-9]+"; then
ISSUE_KEY=$(echo "$PR_BODY" | grep -oE "issues/[0-9]+" | head -1)
echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT
echo "issue_type=GitHub" >> $GITHUB_OUTPUT
echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT"
echo "issue_type=GitHub" >> "$GITHUB_OUTPUT"
fi
else
echo "issue_found=false" >> $GITHUB_OUTPUT
echo "issue_found=false" >> "$GITHUB_OUTPUT"
fi

- name: Comment on PR if issue reference missing
Expand All @@ -61,42 +62,38 @@ jobs:
repo: context.repo.repo,
body: `## ⚠️ Missing Issue Reference

This PR does not reference an issue. Please include a reference in either:
This PR does not reference an issue. Please include one.

**JIRA tickets:**
- PR title: "feat(api): TRCLI-123 Add new endpoint"
- PR description: "Resolves TRCLI-123"
**JIRA example:**
- TRCLI-123

**GitHub issues:**
- PR title: "feat(api): GIT-123 Add new endpoint"
- PR description: "Resolves GIT-123" or "Fixes #123"
- Or link to the GitHub issue

This helps with tracking and project management. Thank you!`
**GitHub examples:**
- GIT-123
- Fixes #123
- issues/123`
})

- name: Check PR Description Completeness
id: check_description
run: |
PR_BODY="${{ github.event.pull_request.body }}"

# Check for required sections
if echo "$PR_BODY" | grep -q "Issue being resolved"; then
echo "has_issue=true" >> $GITHUB_OUTPUT
echo "has_issue=true" >> "$GITHUB_OUTPUT"
else
echo "has_issue=false" >> $GITHUB_OUTPUT
echo "has_issue=false" >> "$GITHUB_OUTPUT"
fi

if echo "$PR_BODY" | grep -q "Solution description"; then
echo "has_solution=true" >> $GITHUB_OUTPUT
echo "has_solution=true" >> "$GITHUB_OUTPUT"
else
echo "has_solution=false" >> $GITHUB_OUTPUT
echo "has_solution=false" >> "$GITHUB_OUTPUT"
fi

if echo "$PR_BODY" | grep -q "Steps to test"; then
echo "has_test_steps=true" >> $GITHUB_OUTPUT
echo "has_test_steps=true" >> "$GITHUB_OUTPUT"
else
echo "has_test_steps=false" >> $GITHUB_OUTPUT
echo "has_test_steps=false" >> "$GITHUB_OUTPUT"
fi

- name: Generate PR Validation Summary
Expand All @@ -107,6 +104,7 @@ jobs:
const issueFound = '${{ steps.check_issue.outputs.issue_found }}' === 'true';
const issueKey = '${{ steps.check_issue.outputs.issue_key }}';
const issueType = '${{ steps.check_issue.outputs.issue_type }}';

const hasIssue = '${{ steps.check_description.outputs.has_issue }}' === 'true';
const hasSolution = '${{ steps.check_description.outputs.has_solution }}' === 'true';
const hasTestSteps = '${{ steps.check_description.outputs.has_test_steps }}' === 'true';
Expand All @@ -124,9 +122,9 @@ jobs:
| Solution Description | ${hasSolution ? '✅ Present' : '⚠️ Missing'} |
| Test Steps | ${hasTestSteps ? '✅ Present' : '⚠️ Missing'} |

${issueFound && hasSolution && hasTestSteps ? '✅ All checks passed!' : '⚠️ Some optional sections are missing. Consider adding them for better review context.'}
${issueFound && hasSolution && hasTestSteps
? '✅ All checks passed!'
: '⚠️ Some optional sections are missing.'}
`;

await core.summary
.addRaw(summary)
.write();
await core.summary.addRaw(summary).write();
236 changes: 236 additions & 0 deletions tests/test_api_request_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -827,6 +827,242 @@ def test_delete_run(self, api_request_handler_verify: ApiRequestHandler, request
resources_added, error = api_request_handler_verify.delete_run(run_id)
assert error == "", "There should be no error in verification."

@pytest.mark.api_handler
def test_update_run_with_include_all_false_standalone(self, api_request_handler: ApiRequestHandler, requests_mock):
"""Test update_run for standalone run with include_all=false"""
run_id = 100
run_name = "Updated Test Run"

# Mock get_run response - standalone run (no plan_id), include_all=false
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": False,
"plan_id": None,
"config_ids": [],
}

# Mock get_tests response - existing cases in run
get_tests_response = {
"offset": 0,
"limit": 250,
"size": 2,
"_links": {"next": None, "prev": None},
"tests": [{"id": 1, "case_id": 1, "status_id": 1}, {"id": 2, "case_id": 2, "status_id": 1}],
}

# Mock update_run response
update_run_response = {"id": run_id, "name": run_name}

requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response)

# Execute update_run
run_data, error = api_request_handler.update_run(run_id, run_name)

# Assertions
assert error == "", "No error should occur"
assert run_data["id"] == run_id, "Run ID should match"

# Verify the payload sent to update_run
request_history = requests_mock.request_history
update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0]
payload = update_request.json()

assert payload["include_all"] == False, "include_all should be False"
assert "case_ids" in payload, "case_ids should be present"
# Should contain union of existing (1, 2) and report cases
assert set(payload["case_ids"]) >= {1, 2}, "Should include existing case IDs"

@pytest.mark.api_handler
def test_update_run_with_include_all_false_plan_with_config(
self, api_request_handler: ApiRequestHandler, requests_mock
):
"""Test update_run for run in plan with config and include_all=false (the bug scenario)"""
run_id = 200
run_name = "Updated Test Run in Plan"

# Mock get_run response - run in plan with config, include_all=false
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": False,
"plan_id": 10,
"config_ids": [5, 6], # Has configs - will use update_run_in_plan_entry
}

# Mock get_tests response - existing cases
get_tests_response = {
"offset": 0,
"limit": 250,
"size": 3,
"_links": {"next": None, "prev": None},
"tests": [
{"id": 1, "case_id": 188, "status_id": 1},
{"id": 2, "case_id": 180, "status_id": 1},
{"id": 3, "case_id": 191, "status_id": 1},
],
}

# Mock update_run_in_plan_entry response
update_run_response = {"id": run_id, "name": run_name}

requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
requests_mock.post(create_url(f"update_run_in_plan_entry/{run_id}"), json=update_run_response)

# Execute update_run
run_data, error = api_request_handler.update_run(run_id, run_name)

# Assertions
assert error == "", "No error should occur"
assert run_data["id"] == run_id, "Run ID should match"

# Verify the payload sent to update_run_in_plan_entry
request_history = requests_mock.request_history
update_request = [r for r in request_history if "update_run_in_plan_entry" in r.url][0]
payload = update_request.json()

# THIS IS THE CRITICAL FIX - must include include_all=False
assert payload["include_all"] == False, "include_all must be False (fixes the bug)"
assert "case_ids" in payload, "case_ids should be present"
# Should contain union of existing (188, 180, 191) and report cases
assert set(payload["case_ids"]) >= {188, 180, 191}, "Should preserve existing case IDs"

@pytest.mark.api_handler
def test_update_run_with_include_all_true_preserves_setting(
self, api_request_handler: ApiRequestHandler, requests_mock
):
"""Test update_run preserves include_all=true and doesn't send case_ids"""
run_id = 300
run_name = "Updated Run with Include All"

# Mock get_run response - include_all=true
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": True, # Run includes all cases
"plan_id": None,
"config_ids": [],
}

# Mock update_run response
update_run_response = {"id": run_id, "name": run_name, "include_all": True}

requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response)

# Execute update_run
run_data, error = api_request_handler.update_run(run_id, run_name)

# Assertions
assert error == "", "No error should occur"
assert run_data["include_all"] == True, "include_all should be preserved"

# Verify the payload sent to update_run
request_history = requests_mock.request_history
update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0]
payload = update_request.json()

assert payload["include_all"] == True, "include_all should be True"
assert "case_ids" not in payload, "case_ids should NOT be present when include_all=True"

@pytest.mark.api_handler
def test_update_run_handles_get_tests_error(self, api_request_handler: ApiRequestHandler, requests_mock):
"""Test update_run handles errors from get_tests gracefully"""
run_id = 400
run_name = "Test Run"

# Mock get_run response - include_all=false
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": False,
"plan_id": None,
"config_ids": [],
}

# Mock get_tests to return error (403 Forbidden, for example)
requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.get(create_url(f"get_tests/{run_id}"), status_code=403, json={"error": "Access denied"})

# Execute update_run - should fail gracefully
run_data, error = api_request_handler.update_run(run_id, run_name)

# Assertions
assert run_data is None, "run_data should be None on error"
assert error is not None, "Error message should be present"
assert "Failed to get tests in run" in error, "Error should indicate get_tests failure"

@pytest.mark.api_handler
def test_update_run_with_include_all_false_plan_without_config(
self, api_request_handler: ApiRequestHandler, requests_mock
):
"""Test update_run for run in plan without config uses update_plan_entry"""
run_id = 500
run_name = "Updated Test Run in Plan No Config"
plan_id = 20
entry_id = "abc-123"

# Mock get_run response - run in plan without config
get_run_response = {
"id": run_id,
"name": "Original Run",
"description": "Original description",
"refs": "REF-1",
"include_all": False,
"plan_id": plan_id,
"config_ids": [], # No configs - will use update_plan_entry
}

# Mock get_tests response
get_tests_response = {
"offset": 0,
"limit": 250,
"size": 1,
"_links": {"next": None, "prev": None},
"tests": [{"id": 1, "case_id": 50, "status_id": 1}],
}

# Mock get_plan response
get_plan_response = {
"id": plan_id,
"entries": [{"id": entry_id, "runs": [{"id": run_id, "entry_id": entry_id}]}],
}

# Mock update_plan_entry response
update_plan_response = {"id": run_id, "name": run_name}

requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response)
requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response)
requests_mock.get(create_url(f"get_plan/{plan_id}"), json=get_plan_response)
requests_mock.post(create_url(f"update_plan_entry/{plan_id}/{entry_id}"), json=update_plan_response)

# Execute update_run
run_data, error = api_request_handler.update_run(run_id, run_name)

# Assertions
assert error == "", "No error should occur"
assert run_data["id"] == run_id, "Run ID should match"

# Verify update_plan_entry was called with correct payload
request_history = requests_mock.request_history
update_request = [r for r in request_history if f"update_plan_entry/{plan_id}/{entry_id}" in r.url][0]
payload = update_request.json()

assert payload["include_all"] == False, "include_all should be False"
assert "case_ids" in payload, "case_ids should be present"
assert 50 in payload["case_ids"], "Should include existing case ID"

@pytest.mark.api_handler
def test_upload_attachments_413_error(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path):
"""Test that 413 errors (file too large) are properly reported."""
Expand Down
21 changes: 16 additions & 5 deletions trcli/api/api_request_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,11 +539,22 @@ def update_run(
else:
add_run_data["refs"] = existing_refs # Keep existing refs if none provided

run_tests, error_message = self.__get_all_tests_in_run(run_id)
run_case_ids = [test["case_id"] for test in run_tests]
report_case_ids = add_run_data["case_ids"]
joint_case_ids = list(set(report_case_ids + run_case_ids))
add_run_data["case_ids"] = joint_case_ids
existing_include_all = run_response.response_text.get("include_all", False)
add_run_data["include_all"] = existing_include_all

if not existing_include_all:
# Only manage explicit case_ids when include_all=False
run_tests, error_message = self.__get_all_tests_in_run(run_id)
if error_message:
return None, f"Failed to get tests in run: {error_message}"
run_case_ids = [test["case_id"] for test in run_tests]
report_case_ids = add_run_data["case_ids"]
joint_case_ids = list(set(report_case_ids + run_case_ids))
add_run_data["case_ids"] = joint_case_ids
else:
# include_all=True: TestRail includes all suite cases automatically
# Do NOT send case_ids array (TestRail ignores it anyway)
add_run_data.pop("case_ids", None)

plan_id = run_response.response_text["plan_id"]
config_ids = run_response.response_text["config_ids"]
Expand Down
Loading