diff --git a/CHANGELOG.MD b/CHANGELOG.MD index b8306f55..0749997a 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -6,6 +6,14 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb - **MINOR**: New features that are backward-compatible. - **PATCH**: Bug fixes or minor changes that do not affect backward compatibility. +## [1.12.2] + +_released 10-16-2025 + +### Added + - Allow adding references via parse_junit command using --test-run-ref + - Allow parse_junit to update existing test cases reference field from JUnit testrail_case_field properties + ## [1.12.1] _released 09-30-2025 diff --git a/README.md b/README.md index d0e29365..f1928226 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.12.1 +TestRail CLI v1.12.2 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) @@ -87,6 +87,7 @@ Commands: parse_junit Parse JUnit report and upload results to TestRail parse_openapi Parse OpenAPI spec and create cases in TestRail parse_robot Parse Robot Framework report and upload results to TestRail + references Manage references in TestRail ``` Uploading automated test results @@ -136,8 +137,16 @@ Options: --allow-ms Allows using milliseconds for elapsed times. --special-parser Optional special parser option for specialized JUnit reports. - -a, --assign Comma-separated list of user emails to assign failed test - results to. + -a, --assign Comma-separated list of user emails to assign failed + test results to. + --test-run-ref Comma-separated list of reference IDs to append to the + test run (up to 250 characters total). + --json-output Output reference operation results in JSON format. + --update-existing-cases Update existing TestRail cases with values from + JUnit properties (default: no). + --update-strategy Strategy for combining incoming values with + existing case field values, whether to append or + replace (default: append). --help Show this message and exit. ``` diff --git a/tests/test_api_request_handler_references.py b/tests/test_api_request_handler_references.py index 07ec075c..17dc4a83 100644 --- a/tests/test_api_request_handler_references.py +++ b/tests/test_api_request_handler_references.py @@ -174,8 +174,8 @@ def test_add_case_references_character_limit_exceeded(self, references_handler): with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response): - # Try to add more refs that would exceed 2000 chars - long_refs = ["REQ-" + "X" * 500 for _ in range(5)] + # Try to add more refs that would exceed 2000 chars (using unique refs to account for deduplication) + long_refs = [f"REQ-{i}-" + "X" * 500 for i in range(5)] success, error = references_handler.add_case_references( case_id=1, @@ -185,6 +185,43 @@ def test_add_case_references_character_limit_exceeded(self, references_handler): assert success is False assert "exceeds 2000 character limit" in error + def test_add_case_references_deduplication(self, references_handler): + """Test that duplicate references in input are deduplicated""" + # Mock get_case response with existing refs + mock_get_case_response = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test Case 1", + "refs": "REQ-1" + }, + error_message=None + ) + + # Mock update_case response + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "refs": "REQ-1,REQ-2,REQ-3"}, + error_message=None + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response), \ + patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.add_case_references( + case_id=1, + references=["REQ-2", "REQ-2", "REQ-3", "REQ-2"] # Duplicates should be removed + ) + + assert success is True + assert error == "" + + # Verify the API call has deduplicated references + references_handler.client.send_post.assert_called_once_with( + "update_case/1", + {'refs': 'REQ-1,REQ-2,REQ-3'} # Duplicates removed, order preserved + ) + def test_update_case_references_success(self, references_handler): """Test successful update of references on a test case""" # Mock update_case response @@ -212,8 +249,8 @@ def test_update_case_references_success(self, references_handler): def test_update_case_references_character_limit_exceeded(self, references_handler): """Test character limit validation for update""" - # Try to update with refs that exceed 2000 chars - long_refs = ["REQ-" + "X" * 500 for _ in range(5)] + # Try to update with refs that exceed 2000 chars (using unique refs to account for deduplication) + long_refs = [f"REQ-{i}-" + "X" * 500 for i in range(5)] success, error = references_handler.update_case_references( case_id=1, @@ -223,6 +260,31 @@ def test_update_case_references_character_limit_exceeded(self, references_handle assert success is False assert "exceeds 2000 character limit" in error + def test_update_case_references_deduplication(self, references_handler): + """Test that duplicate references in input are deduplicated""" + # Mock update_case response + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "refs": "REQ-1,REQ-2"}, + error_message=None + ) + + with patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.update_case_references( + case_id=1, + references=["REQ-1", "REQ-1", "REQ-2", "REQ-1"] # Duplicates should be removed + ) + + assert success is True + assert error == "" + + # Verify the API call has deduplicated references + references_handler.client.send_post.assert_called_once_with( + "update_case/1", + {'refs': 'REQ-1,REQ-2'} # Duplicates removed, order preserved + ) + def test_update_case_references_api_failure(self, references_handler): """Test API failure during update""" # Mock update_case response with failure diff --git a/tests/test_junit_parse_reference.py b/tests/test_junit_parse_reference.py new file mode 100644 index 00000000..1ab13dbf --- /dev/null +++ b/tests/test_junit_parse_reference.py @@ -0,0 +1,273 @@ +""" +Unit tests for new features coverage - focused on critical missing areas. +Tests for --test-run-ref, case updates, and reference management functionality. +""" +import pytest +from unittest.mock import Mock, patch +import json + +from trcli.commands.cmd_parse_junit import _validate_test_run_ref, _handle_case_update_reporting + + +class TestCmdParseJunitValidation: + """Test coverage for cmd_parse_junit.py validation functions""" + + def test_validate_test_run_ref_valid_input(self): + """Test _validate_test_run_ref with valid input""" + # Valid single reference + result = _validate_test_run_ref("REF-123") + assert result is None + + # Valid multiple references + result = _validate_test_run_ref("REF-123,REF-456,REF-789") + assert result is None + + # Valid with spaces + result = _validate_test_run_ref("REF-123, REF-456 , REF-789") + assert result is None + + def test_validate_test_run_ref_invalid_input(self): + """Test _validate_test_run_ref with invalid input""" + # Empty string + result = _validate_test_run_ref("") + assert "cannot be empty" in result + + # Whitespace only + result = _validate_test_run_ref(" ") + assert "cannot be empty" in result + + # Only commas + result = _validate_test_run_ref(",,,") + assert "malformed input" in result + + # Too long (over 250 chars) + long_refs = ",".join([f"REF-{i:03d}" for i in range(50)]) # Creates ~300 chars + result = _validate_test_run_ref(long_refs) + assert "250 character limit" in result + + def test_handle_case_update_reporting_console(self): + """Test _handle_case_update_reporting console output""" + env = Mock() + env.json_output = False + + case_update_results = { + "updated_cases": [ + {"case_id": 123, "case_title": "Test Case 1", "added_refs": ["REF-1"], "skipped_refs": []} + ], + "skipped_cases": [ + {"case_id": 456, "case_title": "Test Case 2", "reason": "All references already present", "skipped_refs": ["REF-2"]} + ], + "failed_cases": [ + {"case_id": 789, "case_title": "Test Case 3", "error": "API error"} + ] + } + + _handle_case_update_reporting(env, case_update_results) + + # Verify console output was logged + assert env.log.called + call_args = [call[0][0] for call in env.log.call_args_list] + output = " ".join(call_args) + assert "Case Reference Updates Summary:" in output + assert "Updated cases: 1" in output + assert "Skipped cases: 1" in output + assert "Failed cases: 1" in output + + @patch('builtins.print') + def test_handle_case_update_reporting_json(self, mock_print): + """Test _handle_case_update_reporting JSON output""" + env = Mock() + env.json_output = True + + case_update_results = { + "updated_cases": [{"case_id": 123, "added_refs": ["REF-1"]}], + "skipped_cases": [], + "failed_cases": [] + } + + _handle_case_update_reporting(env, case_update_results) + + # Verify JSON output + assert mock_print.called + json_output = mock_print.call_args[0][0] + parsed = json.loads(json_output) + assert "summary" in parsed + assert "details" in parsed + assert parsed["summary"]["updated_cases"] == 1 + + def test_handle_case_update_reporting_none_input(self): + """Test _handle_case_update_reporting with None input""" + env = Mock() + + # Should return early without error + result = _handle_case_update_reporting(env, None) + assert result is None + + +class TestReferenceDeduplication: + """Test coverage for reference deduplication logic""" + + def test_reference_deduplication_logic(self): + """Test the deduplication logic used in reference management""" + # Test input with duplicates + references = ["REF-1", "REF-1", "REF-2", "REF-2", "REF-1", "REF-3"] + + # Apply deduplication logic (same as in api_request_handler.py) + seen = set() + unique_refs = [] + for ref in references: + if ref not in seen: + seen.add(ref) + unique_refs.append(ref) + + # Should preserve order and remove duplicates + assert unique_refs == ["REF-1", "REF-2", "REF-3"] + assert len(unique_refs) == 3 + + def test_reference_string_parsing(self): + """Test parsing comma-separated reference strings""" + # Test various input formats + test_cases = [ + ("REF-1,REF-2,REF-3", ["REF-1", "REF-2", "REF-3"]), + ("REF-1, REF-2 , REF-3", ["REF-1", "REF-2", "REF-3"]), + ("REF-1,,REF-2", ["REF-1", "REF-2"]), + (" REF-1 , REF-2 ", ["REF-1", "REF-2"]), + ] + + for input_str, expected in test_cases: + # Apply parsing logic (same as in api_request_handler.py) + refs_list = [ref.strip() for ref in input_str.split(',') if ref.strip()] + assert refs_list == expected + + def test_character_limit_validation(self): + """Test character limit validation for references""" + # Test 250 character limit (for run references) + short_refs = ",".join([f"REF-{i:02d}" for i in range(30)]) # ~150 chars + assert len(short_refs) < 250 + + long_refs = ",".join([f"REF-{i:03d}" for i in range(50)]) # ~300 chars + assert len(long_refs) > 250 + + # Test 2000 character limit (for case references) + very_long_refs = ",".join([f"VERY-LONG-REFERENCE-NAME-{i:03d}" for i in range(100)]) + assert len(very_long_refs) > 2000 + + +class TestJunitReferenceExtraction: + """Test coverage for JUnit reference extraction logic""" + + def test_testrail_case_field_parsing(self): + """Test parsing testrail_case_field values""" + # Test various formats + test_cases = [ + ("refs:REF-1", "REF-1"), + ("refs:REF-1,REF-2", "REF-1,REF-2"), + ("refs:REF-1,REF-2,REF-3", "REF-1,REF-2,REF-3"), + ("refs: REF-1 , REF-2 ", " REF-1 , REF-2 "), # Spaces preserved + ] + + for testrail_field, expected_refs in test_cases: + # Apply parsing logic (same as in junit_xml.py) + if testrail_field.startswith("refs:"): + refs = testrail_field[5:] # Remove "refs:" prefix + assert refs == expected_refs + + def test_case_refs_validation(self): + """Test case reference validation""" + # Test empty/whitespace handling + test_cases = [ + ("", False), + (" ", False), + ("refs:", False), + ("refs: ", False), + ("refs:REF-1", True), + ("refs: REF-1 ", True), + ] + + for case_refs, should_be_valid in test_cases: + # Apply validation logic (same as in junit_xml.py) + if case_refs.startswith("refs:"): + refs_content = case_refs[5:] + is_valid = bool(refs_content and refs_content.strip()) + assert is_valid == should_be_valid + + +class TestCaseUpdateWorkflow: + """Test coverage for case update workflow logic""" + + def test_case_categorization_logic(self): + """Test logic for categorizing cases during updates""" + # Mock test cases + existing_case = {"case_id": 123, "has_junit_refs": True} + newly_created_case = {"case_id": 456, "has_junit_refs": True} + case_without_refs = {"case_id": 789, "has_junit_refs": False} + + # Mock newly created case IDs + newly_created_case_ids = {456} + + # Test categorization logic + cases_to_update = [] + cases_to_skip = [] + + for case in [existing_case, newly_created_case, case_without_refs]: + case_id = case["case_id"] + has_refs = case["has_junit_refs"] + + if case_id in newly_created_case_ids: + cases_to_skip.append({"case_id": case_id, "reason": "Newly created case"}) + elif not has_refs: + cases_to_skip.append({"case_id": case_id, "reason": "No JUnit refs"}) + else: + cases_to_update.append(case) + + # Verify categorization + assert len(cases_to_update) == 1 + assert cases_to_update[0]["case_id"] == 123 + + assert len(cases_to_skip) == 2 + assert any(c["case_id"] == 456 and "Newly created" in c["reason"] for c in cases_to_skip) + assert any(c["case_id"] == 789 and "No JUnit refs" in c["reason"] for c in cases_to_skip) + + def test_update_result_categorization(self): + """Test categorization of update results""" + # Mock API responses + api_responses = [ + (True, "Success", ["REF-1"], []), # Successful update + (True, "Success", [], ["REF-2"]), # All refs already present + (False, "API Error", [], []), # Failed update + ] + + updated_cases = [] + skipped_cases = [] + failed_cases = [] + + for i, (success, message, added_refs, skipped_refs) in enumerate(api_responses): + case_id = 100 + i + + if not success: + failed_cases.append({"case_id": case_id, "error": message}) + elif not added_refs: # No refs were added (all were duplicates) + skipped_cases.append({ + "case_id": case_id, + "reason": "All references already present", + "skipped_refs": skipped_refs + }) + else: + updated_cases.append({ + "case_id": case_id, + "added_refs": added_refs, + "skipped_refs": skipped_refs + }) + + # Verify categorization + assert len(updated_cases) == 1 + assert updated_cases[0]["case_id"] == 100 + assert updated_cases[0]["added_refs"] == ["REF-1"] + + assert len(skipped_cases) == 1 + assert skipped_cases[0]["case_id"] == 101 + assert "All references already present" in skipped_cases[0]["reason"] + + assert len(failed_cases) == 1 + assert failed_cases[0]["case_id"] == 102 + assert failed_cases[0]["error"] == "API Error" diff --git a/tests/test_junit_parser.py b/tests/test_junit_parser.py index 6a187890..7c2e107c 100644 --- a/tests/test_junit_parser.py +++ b/tests/test_junit_parser.py @@ -169,9 +169,12 @@ def test_junit_xml_parser_validation_error(self): def __clear_unparsable_junit_elements( self, test_rail_suite: TestRailSuite ) -> TestRailSuite: - """helper method to delete junit_result_unparsed field, + """helper method to delete junit_result_unparsed field and temporary junit_case_refs attribute, which asdict() method of dataclass can't handle""" for section in test_rail_suite.testsections: for case in section.testcases: case.result.junit_result_unparsed = [] + # Remove temporary junit_case_refs attribute if it exists + if hasattr(case, '_junit_case_refs'): + delattr(case, '_junit_case_refs') return test_rail_suite diff --git a/tests/test_robot_parser.py b/tests/test_robot_parser.py index 10f49556..35a8594e 100644 --- a/tests/test_robot_parser.py +++ b/tests/test_robot_parser.py @@ -53,13 +53,25 @@ def test_robot_xml_parser_id_matcher_name( env.case_matcher = matcher env.file = input_xml_path file_reader = RobotParser(env) - read_junit = file_reader.parse_file()[0] + read_junit = self.__clear_unparsable_junit_elements(file_reader.parse_file()[0]) parsing_result_json = asdict(read_junit) file_json = open(expected_path) expected_json = json.load(file_json) assert DeepDiff(parsing_result_json, expected_json) == {}, \ f"Result of parsing XML is different than expected \n{DeepDiff(parsing_result_json, expected_json)}" + def __clear_unparsable_junit_elements( + self, test_rail_suite: TestRailSuite + ) -> TestRailSuite: + """helper method to delete temporary junit_case_refs attribute, + which asdict() method of dataclass can't handle""" + for section in test_rail_suite.testsections: + for case in section.testcases: + # Remove temporary junit_case_refs attribute if it exists + if hasattr(case, '_junit_case_refs'): + delattr(case, '_junit_case_refs') + return test_rail_suite + @pytest.mark.parse_robot def test_robot_xml_parser_file_not_found(self): with pytest.raises(FileNotFoundError): diff --git a/trcli/__init__.py b/trcli/__init__.py index 438a38d1..858085de 100644 --- a/trcli/__init__.py +++ b/trcli/__init__.py @@ -1 +1 @@ -__version__ = "1.12.1" +__version__ = "1.12.2" diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index da6f0bc1..687f5ea9 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -529,6 +529,149 @@ def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] return ','.join(combined_list) + def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: + """ + Append references to a test run, avoiding duplicates. + :param run_id: ID of the test run + :param references: List of references to append + :returns: Tuple with (run_data, added_refs, skipped_refs, error_message) + """ + # Get current run data + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.error_message: + return None, [], [], run_response.error_message + + existing_refs = run_response.response_text.get("refs", "") or "" + + # Parse existing and new references + existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] + # Deduplicate input references + new_list = [] + seen = set() + for ref in references: + ref_clean = ref.strip() + if ref_clean and ref_clean not in seen: + new_list.append(ref_clean) + seen.add(ref_clean) + + # Determine which references are new vs duplicates + added_refs = [ref for ref in new_list if ref not in existing_list] + skipped_refs = [ref for ref in new_list if ref in existing_list] + + # If no new references to add, return current state + if not added_refs: + return run_response.response_text, added_refs, skipped_refs, None + + # Combine references + combined_list = existing_list + added_refs + combined_refs = ','.join(combined_list) + + if len(combined_refs) > 250: + return None, [], [], f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit" + + update_data = {"refs": combined_refs} + + # Determine the correct API endpoint based on plan membership + plan_id = run_response.response_text.get("plan_id") + config_ids = run_response.response_text.get("config_ids") + + if not plan_id: + # Standalone run + update_response = self.client.send_post(f"update_run/{run_id}", update_data) + elif plan_id and config_ids: + # Run in plan with configurations + update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", update_data) + else: + # Run in plan without configurations - need to use plan entry endpoint + plan_response = self.client.send_get(f"get_plan/{plan_id}") + if plan_response.error_message: + return None, [], [], f"Failed to get plan details: {plan_response.error_message}" + + # Find the entry_id for this run + entry_id = None + for entry in plan_response.response_text.get("entries", []): + for run in entry.get("runs", []): + if run["id"] == run_id: + entry_id = entry["id"] + break + if entry_id: + break + + if not entry_id: + return None, [], [], f"Could not find plan entry for run {run_id}" + + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) + + if update_response.error_message: + return None, [], [], update_response.error_message + + updated_run_response = self.client.send_get(f"get_run/{run_id}") + return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message + + def update_existing_case_references(self, case_id: int, junit_refs: str, strategy: str = "append") -> Tuple[bool, str, List[str], List[str]]: + """ + Update existing case references with values from JUnit properties. + :param case_id: ID of the test case + :param junit_refs: References from JUnit testrail_case_field property + :param strategy: 'append' or 'replace' + :returns: Tuple with (success, error_message, added_refs, skipped_refs) + """ + if not junit_refs or not junit_refs.strip(): + return True, None, [], [] # No references to process + + # Parse and validate JUnit references, deduplicating input + junit_ref_list = [] + seen = set() + for ref in junit_refs.split(','): + ref_clean = ref.strip() + if ref_clean and ref_clean not in seen: + junit_ref_list.append(ref_clean) + seen.add(ref_clean) + + if not junit_ref_list: + return False, "No valid references found in JUnit property", [], [] + + # Get current case data + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.error_message: + return False, case_response.error_message, [], [] + + existing_refs = case_response.response_text.get('refs', '') or '' + + if strategy == "replace": + # Replace strategy: use JUnit refs as-is + new_refs = ','.join(junit_ref_list) + added_refs = junit_ref_list + skipped_refs = [] + else: + # Append strategy: combine with existing refs, avoiding duplicates + existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] + + # Determine which references are new vs duplicates + added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] + skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] + + # If no new references to add, return current state + if not added_refs: + return True, None, added_refs, skipped_refs + + # Combine references + combined_list = existing_ref_list + added_refs + new_refs = ','.join(combined_list) + + # Validate 2000 character limit for test case references + if len(new_refs) > 2000: + return False, f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", [], [] + + # Update the case + update_data = {"refs": new_refs} + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.error_message: + return False, update_response.error_message, [], [] + + return True, None, added_refs, skipped_refs + def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): """ Getting test result id and upload attachments for it. """ tests_in_run, error = self.__get_all_tests_in_run(run_id) @@ -1474,9 +1617,18 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool if existing_refs: existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - # Add new references (avoid duplicates) - all_refs = existing_ref_list.copy() + # Deduplicate input references while preserving order + deduplicated_input = [] + seen = set() for ref in references: + ref_clean = ref.strip() + if ref_clean and ref_clean not in seen: + deduplicated_input.append(ref_clean) + seen.add(ref_clean) + + # Add new references (avoid duplicates with existing) + all_refs = existing_ref_list.copy() + for ref in deduplicated_input: if ref not in all_refs: all_refs.append(ref) @@ -1503,8 +1655,17 @@ def update_case_references(self, case_id: int, references: List[str]) -> Tuple[b :param references: List of references to replace existing ones :returns: Tuple with success status and error string """ + # Deduplicate input references while preserving order + deduplicated_refs = [] + seen = set() + for ref in references: + ref_clean = ref.strip() + if ref_clean and ref_clean not in seen: + deduplicated_refs.append(ref_clean) + seen.add(ref_clean) + # Join references - new_refs_string = ','.join(references) + new_refs_string = ','.join(deduplicated_refs) # Validate total character limit if len(new_refs_string) > 2000: @@ -1545,8 +1706,11 @@ def delete_case_references(self, case_id: int, specific_references: List[str] = # Parse existing references existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] + # Deduplicate input references for efficient processing + refs_to_delete = set(ref.strip() for ref in specific_references if ref.strip()) + # Remove specific references - remaining_refs = [ref for ref in existing_ref_list if ref not in specific_references] + remaining_refs = [ref for ref in existing_ref_list if ref not in refs_to_delete] # Join remaining references new_refs_string = ','.join(remaining_refs) diff --git a/trcli/api/results_uploader.py b/trcli/api/results_uploader.py index c471c0ce..de99f3ac 100644 --- a/trcli/api/results_uploader.py +++ b/trcli/api/results_uploader.py @@ -1,5 +1,5 @@ import time -from beartype.typing import Tuple, Callable, List +from beartype.typing import Tuple, Callable, List, Dict from trcli.api.project_based_client import ProjectBasedClient from trcli.cli import Environment @@ -17,6 +17,7 @@ class ResultsUploader(ProjectBasedClient): def __init__(self, environment: Environment, suite: TestRailSuite, skip_run: bool = False): super().__init__(environment, suite) self.skip_run = skip_run + self.last_run_id = None if hasattr(self.environment, 'special_parser') and self.environment.special_parser == "saucectl": self.run_name += f" ({suite.name})" @@ -102,8 +103,23 @@ def upload_results(self): else: self.environment.log(f"Removed {len(empty_sections)} unused/empty section(s).") + # Update existing cases with JUnit references if enabled + case_update_results = None + case_update_failed = [] + if hasattr(self.environment, 'update_existing_cases') and self.environment.update_existing_cases == "yes": + self.environment.log("Updating existing cases with JUnit references...") + case_update_results, case_update_failed = self.update_existing_cases_with_junit_refs(added_test_cases) + + if case_update_results.get("updated_cases"): + self.environment.log(f"Updated {len(case_update_results['updated_cases'])} existing case(s) with references.") + if case_update_results.get("failed_cases"): + self.environment.elog(f"Failed to update {len(case_update_results['failed_cases'])} case(s).") + # Create/update test run run_id, error_message = self.create_or_update_test_run() + self.last_run_id = run_id + # Store case update results for later reporting + self.case_update_results = case_update_results if error_message: revert_logs = self.rollback_changes( suite_id=suite_id, @@ -147,6 +163,8 @@ def upload_results(self): except (AttributeError, TypeError): # Skip exit if there are any issues with the attribute pass + + # Note: Error exit for case update failures is handled in cmd_parse_junit.py after reporting def _validate_and_store_user_ids(self): """ @@ -202,6 +220,94 @@ def _validate_and_store_user_ids(self): # Store valid user IDs for later use self.environment._validated_user_ids = valid_user_ids + def update_existing_cases_with_junit_refs(self, added_test_cases: List[Dict] = None) -> Tuple[Dict, List]: + """ + Update existing test cases with references from JUnit properties. + Excludes newly created cases to avoid unnecessary API calls. + + :param added_test_cases: List of cases that were just created (to be excluded) + :returns: Tuple of (update_results, failed_cases) + """ + if not hasattr(self.environment, 'update_existing_cases') or self.environment.update_existing_cases != "yes": + return {}, [] # Feature not enabled + + # Create a set of newly created case IDs to exclude + newly_created_case_ids = set() + if added_test_cases: + # Ensure all case IDs are integers for consistent comparison + newly_created_case_ids = {int(case.get('case_id')) for case in added_test_cases if case.get('case_id')} + + update_results = { + "updated_cases": [], + "skipped_cases": [], + "failed_cases": [] + } + failed_cases = [] + + strategy = getattr(self.environment, 'update_strategy', 'append') + + # Process all test cases in all sections + for section in self.api_request_handler.suites_data_from_provider.testsections: + for test_case in section.testcases: + # Only process cases that have a case_id (existing cases) and JUnit refs + # AND exclude newly created cases + if (test_case.case_id and + hasattr(test_case, '_junit_case_refs') and test_case._junit_case_refs and + int(test_case.case_id) not in newly_created_case_ids): + try: + success, error_msg, added_refs, skipped_refs = self.api_request_handler.update_existing_case_references( + test_case.case_id, test_case._junit_case_refs, strategy + ) + + if success: + if added_refs: + # Only count as "updated" if references were actually added + update_results["updated_cases"].append({ + "case_id": test_case.case_id, + "case_title": test_case.title, + "added_refs": added_refs, + "skipped_refs": skipped_refs + }) + else: + # If no refs were added (all were duplicates or no valid refs), count as skipped + reason = "All references already present" if skipped_refs else "No valid references to process" + update_results["skipped_cases"].append({ + "case_id": test_case.case_id, + "case_title": test_case.title, + "reason": reason, + "skipped_refs": skipped_refs + }) + else: + error_info = { + "case_id": test_case.case_id, + "case_title": test_case.title, + "error": error_msg + } + update_results["failed_cases"].append(error_info) + failed_cases.append(error_info) + self.environment.elog(f"Failed to update case C{test_case.case_id}: {error_msg}") + + except Exception as e: + error_info = { + "case_id": test_case.case_id, + "case_title": test_case.title, + "error": str(e) + } + update_results["failed_cases"].append(error_info) + failed_cases.append(error_info) + self.environment.elog(f"Exception updating case C{test_case.case_id}: {str(e)}") + + elif (test_case.case_id and + hasattr(test_case, '_junit_case_refs') and test_case._junit_case_refs and + int(test_case.case_id) in newly_created_case_ids): + # Skip newly created cases - they already have their references set + update_results["skipped_cases"].append({ + "case_id": test_case.case_id, + "case_title": test_case.title, + "reason": "Newly created case - references already set during creation" + }) + + return update_results, failed_cases def add_missing_sections(self, project_id: int) -> Tuple[List, int]: """ diff --git a/trcli/cli.py b/trcli/cli.py index 07233a4f..155e1e69 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -69,6 +69,10 @@ def __init__(self, cmd="parse_junit"): self.run_include_all = None self.auto_close_run = None self.run_refs = None + self.test_run_ref = None + self.json_output = None + self.update_existing_cases = None + self.update_strategy = None self.proxy = None self.assign_failed_to = None # Add proxy related attributes self.noproxy = None diff --git a/trcli/commands/cmd_parse_junit.py b/trcli/commands/cmd_parse_junit.py index 218fb6e7..e24fcea6 100644 --- a/trcli/commands/cmd_parse_junit.py +++ b/trcli/commands/cmd_parse_junit.py @@ -27,6 +27,30 @@ metavar="", help="Comma-separated list of user emails to assign failed test results to." ) +@click.option( + "--test-run-ref", + metavar="", + help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total)." +) +@click.option( + "--json-output", + is_flag=True, + help="Output reference operation results in JSON format." +) +@click.option( + "--update-existing-cases", + type=click.Choice(["yes", "no"], case_sensitive=False), + default="no", + metavar="", + help="Update existing TestRail cases with values from JUnit properties (default: no)." +) +@click.option( + "--update-strategy", + type=click.Choice(["append", "replace"], case_sensitive=False), + default="append", + metavar="", + help="Strategy for combining incoming values with existing case field values, whether to append or replace (default: append)." +) @click.pass_context @pass_environment def cli(environment: Environment, context: click.Context, *args, **kwargs): @@ -34,13 +58,40 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.cmd = "parse_junit" environment.set_parameters(context) environment.check_for_required_parameters() + + if environment.test_run_ref is not None: + validation_error = _validate_test_run_ref(environment.test_run_ref) + if validation_error: + environment.elog(validation_error) + exit(1) + settings.ALLOW_ELAPSED_MS = environment.allow_ms print_config(environment) try: parsed_suites = JunitParser(environment).parse_file() + run_id = None + case_update_results = {} for suite in parsed_suites: result_uploader = ResultsUploader(environment=environment, suite=suite) result_uploader.upload_results() + + if run_id is None and hasattr(result_uploader, 'last_run_id'): + run_id = result_uploader.last_run_id + + # Collect case update results + if hasattr(result_uploader, 'case_update_results'): + case_update_results = result_uploader.case_update_results + + if environment.test_run_ref and run_id: + _handle_test_run_references(environment, run_id) + + # Handle case update reporting if enabled + if environment.update_existing_cases == "yes" and case_update_results is not None: + _handle_case_update_reporting(environment, case_update_results) + + # Exit with error if there were case update failures (after reporting) + if case_update_results.get("failed_cases"): + exit(1) except FileNotFoundError: environment.elog(FAULT_MAPPING["missing_file"]) exit(1) @@ -56,3 +107,127 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): ) ) exit(1) + + +def _validate_test_run_ref(test_run_ref: str) -> str: + """ + Validate the test-run-ref input. + Returns error message if invalid, None if valid. + """ + if not test_run_ref or not test_run_ref.strip(): + return "Error: --test-run-ref cannot be empty or whitespace-only" + + refs = [ref.strip() for ref in test_run_ref.split(',') if ref.strip()] + if not refs: + return "Error: --test-run-ref contains no valid references (malformed input)" + + if len(test_run_ref) > 250: + return f"Error: --test-run-ref exceeds 250 character limit ({len(test_run_ref)} characters)" + + return None + + +def _handle_test_run_references(environment: Environment, run_id: int): + """ + Handle appending references to the test run. + """ + from trcli.api.project_based_client import ProjectBasedClient + from trcli.data_classes.dataclass_testrail import TestRailSuite + import json + + refs = [ref.strip() for ref in environment.test_run_ref.split(',') if ref.strip()] + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name="temp", suite_id=1) + ) + project_client.resolve_project() + + environment.log(f"Appending references to test run {run_id}...") + run_data, added_refs, skipped_refs, error_message = project_client.api_request_handler.append_run_references( + run_id, refs + ) + + if error_message: + environment.elog(f"Error: Failed to append references: {error_message}") + exit(1) + + final_refs = run_data.get("refs", "") if run_data else "" + + if environment.json_output: + # JSON output + result = { + "run_id": run_id, + "added": added_refs, + "skipped": skipped_refs, + "total_references": final_refs + } + print(json.dumps(result, indent=2)) + else: + environment.log(f"References appended successfully:") + environment.log(f" Run ID: {run_id}") + environment.log(f" Total references: {len(final_refs.split(',')) if final_refs else 0}") + environment.log(f" Newly added: {len(added_refs)} ({', '.join(added_refs) if added_refs else 'none'})") + environment.log(f" Skipped (duplicates): {len(skipped_refs)} ({', '.join(skipped_refs) if skipped_refs else 'none'})") + if final_refs: + environment.log(f" All references: {final_refs}") + + +def _handle_case_update_reporting(environment: Environment, case_update_results: dict): + """ + Handle reporting of case update results. + """ + import json + + # Handle None input gracefully + if case_update_results is None: + return + + if environment.json_output: + # JSON output for case updates + result = { + "summary": { + "updated_cases": len(case_update_results.get("updated_cases", [])), + "skipped_cases": len(case_update_results.get("skipped_cases", [])), + "failed_cases": len(case_update_results.get("failed_cases", [])) + }, + "details": { + "updated_cases": case_update_results.get("updated_cases", []), + "skipped_cases": case_update_results.get("skipped_cases", []), + "failed_cases": case_update_results.get("failed_cases", []) + } + } + print(json.dumps(result, indent=2)) + else: + # Console output for case updates + updated_cases = case_update_results.get("updated_cases", []) + skipped_cases = case_update_results.get("skipped_cases", []) + failed_cases = case_update_results.get("failed_cases", []) + + if updated_cases or skipped_cases or failed_cases: + environment.log("Case Reference Updates Summary:") + environment.log(f" Updated cases: {len(updated_cases)}") + environment.log(f" Skipped cases: {len(skipped_cases)}") + environment.log(f" Failed cases: {len(failed_cases)}") + + if updated_cases: + environment.log(" Updated case details:") + for case_info in updated_cases: + case_id = case_info["case_id"] + added = case_info.get("added_refs", []) + skipped = case_info.get("skipped_refs", []) + environment.log(f" C{case_id}: added {len(added)} refs, skipped {len(skipped)} duplicates") + + if skipped_cases: + environment.log(" Skipped case details:") + for case_info in skipped_cases: + case_id = case_info["case_id"] + reason = case_info.get("reason", "Unknown reason") + environment.log(f" C{case_id}: {reason}") + + if failed_cases: + environment.log(" Failed case details:") + for case_info in failed_cases: + case_id = case_info["case_id"] + error = case_info.get("error", "Unknown error") + environment.log(f" C{case_id}: {error}") diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index 36bc64fe..2218c319 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -145,6 +145,7 @@ def _parse_case_properties(case): result_fields = [] comments = [] case_fields = [] + case_refs = None sauce_session = None for case_props in case.iterchildren(Properties): @@ -166,11 +167,16 @@ def _parse_case_properties(case): comments.append(value) elif name.startswith("testrail_case_field"): text = prop._elem.text.strip() if prop._elem.text else None - case_fields.append(text or value) + field_value = text or value + case_fields.append(field_value) + + # Extract refs for case updates + if field_value and field_value.startswith("refs:"): + case_refs = field_value[5:].strip() # Remove "refs:" prefix elif name.startswith("testrail_sauce_session"): sauce_session = value - return result_steps, attachments, result_fields, comments, case_fields, sauce_session + return result_steps, attachments, result_fields, comments, case_fields, case_refs, sauce_session def _resolve_case_fields(self, result_fields, case_fields): result_fields_dict, error = FieldsParser.resolve_fields(result_fields) @@ -195,7 +201,7 @@ def _parse_test_cases(self, section) -> List[TestRailCase]: """ automation_id = f"{case.classname}.{case.name}" case_id, case_name = self._extract_case_id_and_name(case) - result_steps, attachments, result_fields, comments, case_fields, sauce_session = self._parse_case_properties( + result_steps, attachments, result_fields, comments, case_fields, case_refs, sauce_session = self._parse_case_properties( case) result_fields_dict, case_fields_dict = self._resolve_case_fields(result_fields, case_fields) status_id = self._get_status_id_for_case_result(case) @@ -219,14 +225,27 @@ def _parse_test_cases(self, section) -> List[TestRailCase]: case_fields_dict.pop(OLD_SYSTEM_NAME_AUTOMATION_ID, None) or case._elem.get(OLD_SYSTEM_NAME_AUTOMATION_ID, automation_id)) - test_cases.append(TestRailCase( - title=TestRailCaseFieldsOptimizer.extract_last_words(case_name, - TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH), - case_id=case_id, - result=result, - custom_automation_id=automation_id, - case_fields=case_fields_dict - )) + # Create TestRailCase kwargs + case_kwargs = { + "title": TestRailCaseFieldsOptimizer.extract_last_words(case_name, + TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH), + "case_id": case_id, + "result": result, + "custom_automation_id": automation_id, + "case_fields": case_fields_dict, + } + + # Only set refs field if case_refs has actual content + if case_refs and case_refs.strip(): + case_kwargs["refs"] = case_refs + + test_case = TestRailCase(**case_kwargs) + + # Store JUnit references as a temporary attribute for case updates (not serialized) + if case_refs and case_refs.strip(): + test_case._junit_case_refs = case_refs + + test_cases.append(test_case) return test_cases