From e0448fbed0bfcd12065164ab4d41acc429be7306 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 5 Nov 2025 17:51:33 +0800 Subject: [PATCH 01/10] Updated files for 1.12.5 release --- CHANGELOG.MD | 7 +++++++ README.md | 8 ++++---- trcli/__init__.py | 2 +- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 485784d..f408950 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -6,6 +6,13 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb - **MINOR**: New features that are backward-compatible. - **PATCH**: Bug fixes or minor changes that do not affect backward compatibility. +## [1.12.5] + +_released 11-03-2025 + +### Fixed + - Fixed an issue where adding labels to project fails using label add command + ## [1.12.4] _released 11-03-2025 diff --git a/README.md b/README.md index 863573e..8f4dee5 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) @@ -47,7 +47,7 @@ CLI general reference -------- ```shell $ trcli --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli [OPTIONS] COMMAND [ARGS]... @@ -1094,7 +1094,7 @@ Options: ### Reference ```shell $ trcli add_run --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli add_run [OPTIONS] @@ -1218,7 +1218,7 @@ providing you with a solid base of test cases, which you can further expand on T ### Reference ```shell $ trcli parse_openapi --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli parse_openapi [OPTIONS] diff --git a/trcli/__init__.py b/trcli/__init__.py index 19ee973..dd8aa62 100644 --- a/trcli/__init__.py +++ b/trcli/__init__.py @@ -1 +1 @@ -__version__ = "1.12.4" +__version__ = "1.12.5" From aacebdea8f352b964e432bdb3a671303ddb20567 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 5 Nov 2025 18:01:44 +0800 Subject: [PATCH 02/10] TRCLI-205 Updated payload format for adding labels, also updated unit and functional tests --- tests/test_api_request_handler_labels.py | 981 +++++++---------- tests_e2e/test_end2end.py | 1264 +++++++++++----------- trcli/api/api_request_handler.py | 887 ++++++++------- 3 files changed, 1456 insertions(+), 1676 deletions(-) diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index c447613..702f46d 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -21,13 +21,12 @@ def labels_handler(): environment.batch_size = 10 environment.case_matcher = MatchersParser.AUTO - # Load test data json_path = Path(__file__).parent / "test_data/json/api_request_handler.json" with open(json_path) as file_json: json_string = json.dumps(json.load(file_json)) test_input = from_json(TestRailSuite, json_string) - + api_request = ApiRequestHandler(environment, api_client, test_input, verify=False) return api_request @@ -39,93 +38,68 @@ def test_add_label_success(self, labels_handler): """Test successful label addition""" # Mock the API client response mock_response = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Test Label"}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Test Label"}, error_message=None ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): result, error = labels_handler.add_label(project_id=1, title="Test Label") - + assert error is None assert result["id"] == 1 assert result["title"] == "Test Label" - + # Verify the API call was made with correct parameters - labels_handler.client.send_post.assert_called_once_with( - "add_label/1", - payload=None, - files={'title': (None, "Test Label")} - ) + labels_handler.client.send_post.assert_called_once_with("add_label/1", payload={"title": "Test Label"}) def test_add_label_api_error(self, labels_handler): """Test label addition with API error""" - mock_response = APIClientResult( - status_code=400, - response_text=None, - error_message="Label title already exists" - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=400, response_text=None, error_message="Label title already exists") + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): result, error = labels_handler.add_label(project_id=1, title="Duplicate Label") - + assert error == "Label title already exists" assert result is None - def test_add_label_multipart_format(self, labels_handler): - """Test label addition uses multipart/form-data format""" + def test_add_label_json_format(self, labels_handler): + """Test label addition uses JSON format""" mock_response = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Test Label"}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Test Label"}, error_message=None ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): result, error = labels_handler.add_label(project_id=1, title="Test Label") - + assert error is None - # Verify multipart/form-data format is used + # Verify JSON format is used call_args = labels_handler.client.send_post.call_args - assert call_args[1]['payload'] is None - assert call_args[1]['files'] == {'title': (None, "Test Label")} + assert call_args[1]["payload"] == {"title": "Test Label"} def test_update_label_success(self, labels_handler): """Test successful label update""" mock_response = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Updated Label"}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Updated Label"}, error_message=None ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): - result, error = labels_handler.update_label( - label_id=1, project_id=1, title="Updated Label" - ) - + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): + result, error = labels_handler.update_label(label_id=1, project_id=1, title="Updated Label") + assert error is None assert result["id"] == 1 assert result["title"] == "Updated Label" - + # Verify the API call was made with correct parameters labels_handler.client.send_post.assert_called_once_with( - "update_label/1", - payload=None, - files={'project_id': (None, '1'), 'title': (None, "Updated Label")} + "update_label/1", payload={"project_id": 1, "title": "Updated Label"} ) def test_update_label_api_error(self, labels_handler): """Test label update with API error""" - mock_response = APIClientResult( - status_code=403, - response_text=None, - error_message="No access to the project" - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): - result, error = labels_handler.update_label( - label_id=1, project_id=1, title="Updated Label" - ) - + mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project") + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): + result, error = labels_handler.update_label(label_id=1, project_id=1, title="Updated Label") + assert error == "No access to the project" assert result is None @@ -133,37 +107,28 @@ def test_get_label_success(self, labels_handler): """Test successful single label retrieval""" mock_response = APIClientResult( status_code=200, - response_text={ - "id": 1, - "title": "Test Label", - "created_by": "2", - "created_on": "1234567890" - }, - error_message=None + response_text={"id": 1, "title": "Test Label", "created_by": "2", "created_on": "1234567890"}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_label(label_id=1) - + assert error is None assert result["id"] == 1 assert result["title"] == "Test Label" assert result["created_by"] == "2" - + # Verify the API call was made with correct parameters labels_handler.client.send_get.assert_called_once_with("get_label/1") def test_get_label_not_found(self, labels_handler): """Test single label retrieval when label not found""" - mock_response = APIClientResult( - status_code=400, - response_text=None, - error_message="Label not found" - ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + mock_response = APIClientResult(status_code=400, response_text=None, error_message="Label not found") + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_label(label_id=999) - + assert error == "Label not found" assert result is None @@ -178,21 +143,21 @@ def test_get_labels_success(self, labels_handler): "_links": {"next": None, "prev": None}, "labels": [ {"id": 1, "title": "Label 1", "created_by": "2", "created_on": "1234567890"}, - {"id": 2, "title": "Label 2", "created_by": "3", "created_on": "1234567891"} - ] + {"id": 2, "title": "Label 2", "created_by": "3", "created_on": "1234567891"}, + ], }, - error_message=None + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_labels(project_id=1) - + assert error is None assert result["size"] == 2 assert len(result["labels"]) == 2 assert result["labels"][0]["id"] == 1 assert result["labels"][1]["id"] == 2 - + # Verify the API call was made with correct parameters labels_handler.client.send_get.assert_called_once_with("get_labels/1") @@ -200,24 +165,18 @@ def test_get_labels_with_pagination(self, labels_handler): """Test labels listing with custom pagination parameters""" mock_response = APIClientResult( status_code=200, - response_text={ - "offset": 10, - "limit": 5, - "size": 0, - "_links": {"next": None, "prev": None}, - "labels": [] - }, - error_message=None + response_text={"offset": 10, "limit": 5, "size": 0, "_links": {"next": None, "prev": None}, "labels": []}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_labels(project_id=1, offset=10, limit=5) - + assert error is None assert result["offset"] == 10 assert result["limit"] == 5 assert len(result["labels"]) == 0 - + # Verify the API call was made with pagination parameters labels_handler.client.send_get.assert_called_once_with("get_labels/1&offset=10&limit=5") @@ -225,127 +184,90 @@ def test_get_labels_with_default_pagination(self, labels_handler): """Test labels listing with default pagination (should not add parameters)""" mock_response = APIClientResult( status_code=200, - response_text={ - "offset": 0, - "limit": 250, - "size": 1, - "labels": [{"id": 1, "title": "Label 1"}] - }, - error_message=None + response_text={"offset": 0, "limit": 250, "size": 1, "labels": [{"id": 1, "title": "Label 1"}]}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_labels(project_id=1, offset=0, limit=250) - + assert error is None # Should call without pagination parameters since they're defaults labels_handler.client.send_get.assert_called_once_with("get_labels/1") def test_get_labels_api_error(self, labels_handler): """Test labels listing with API error""" - mock_response = APIClientResult( - status_code=403, - response_text=None, - error_message="No access to the project" - ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project") + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_labels(project_id=1) - + assert error == "No access to the project" assert result is None def test_delete_labels_success(self, labels_handler): """Test successful label deletion""" - mock_response = APIClientResult( - status_code=200, - response_text="Success", - error_message=None - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None) + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_labels(label_ids=[1, 2, 3]) - + assert success is True assert error is None - + # Verify the API call was made with correct parameters - labels_handler.client.send_post.assert_called_once_with( - "delete_labels", - payload=None, - files={"label_ids": (None, "[1, 2, 3]")} - ) + labels_handler.client.send_post.assert_called_once_with("delete_labels", payload={"label_ids": [1, 2, 3]}) def test_delete_label_single_id(self, labels_handler): """Test single label deletion""" - mock_response = APIClientResult( - status_code=200, - response_text="Success", - error_message=None - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None) + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_label(label_id=1) - + assert success is True assert error is None - - labels_handler.client.send_post.assert_called_once_with( - "delete_label/1" - ) + + labels_handler.client.send_post.assert_called_once_with("delete_label/1") def test_delete_labels_batch(self, labels_handler): """Test batch label deletion with multiple IDs""" - mock_response = APIClientResult( - status_code=200, - response_text="Success", - error_message=None - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None) + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_labels(label_ids=[1, 2, 3]) - + assert success is True assert error is None - - labels_handler.client.send_post.assert_called_once_with( - "delete_labels", - payload=None, - files={"label_ids": (None, "[1, 2, 3]")} - ) + + labels_handler.client.send_post.assert_called_once_with("delete_labels", payload={"label_ids": [1, 2, 3]}) def test_delete_labels_api_error(self, labels_handler): """Test label deletion with API error""" mock_response = APIClientResult( - status_code=400, - response_text=None, - error_message="One or more labels not found" + status_code=400, response_text=None, error_message="One or more labels not found" ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_labels(label_ids=[999, 1000]) - + assert success is False assert error == "One or more labels not found" def test_delete_labels_forbidden(self, labels_handler): """Test label deletion with forbidden access""" - mock_response = APIClientResult( - status_code=403, - response_text=None, - error_message="No access to the project" - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project") + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_labels(label_ids=[1]) - + assert success is False - assert error == "No access to the project" + assert error == "No access to the project" class TestApiRequestHandlerLabelsCases: """Test cases for test case label operations""" - + def setup_method(self): """Set up test fixtures""" # Create proper objects like the existing fixture @@ -353,355 +275,333 @@ def setup_method(self): environment = Environment() environment.project = "Test Project" environment.batch_size = 10 - + # Create a minimal TestRailSuite for testing from trcli.data_classes.dataclass_testrail import TestRailSuite + test_suite = TestRailSuite(name="Test Suite") - + self.labels_handler = ApiRequestHandler(environment, api_client, test_suite, verify=False) - + def test_add_labels_to_cases_success(self): """Test successful addition of labels to test cases""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler, 'add_label') as mock_add_label, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ - patch.object(self.labels_handler.client, 'send_post') as mock_send_post: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler, "get_labels" + ) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object( + self.labels_handler.client, "send_get" + ) as mock_send_get, patch.object( + self.labels_handler.client, "send_post" + ) as mock_send_post: + # Mock __get_all_cases response (cases exist) - mock_get_cases.return_value = ([ - {"id": 1, "title": "Case 1", "suite_id": 1}, - {"id": 2, "title": "Case 2", "suite_id": 1} - ], "") - + mock_get_cases.return_value = ( + [{"id": 1, "title": "Case 1", "suite_id": 1}, {"id": 2, "title": "Case 2", "suite_id": 1}], + "", + ) + # Mock get_labels response (label doesn't exist) mock_get_labels.return_value = ({"labels": []}, "") - + # Mock add_label response (create new label) mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "") - + # Mock get_case responses mock_send_get.side_effect = [ MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}), # Case 1 - MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}) # Case 2 + MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}), # Case 2 ] - + # Mock update_cases batch response (for multiple cases) mock_send_post.return_value = MagicMock(status_code=200) - + # Test the method results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1, 2], - title="test-label", - project_id=1 + case_ids=[1, 2], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 2 - assert len(results['failed_cases']) == 0 - assert len(results['max_labels_reached']) == 0 - assert len(results['case_not_found']) == 0 - + assert len(results["successful_cases"]) == 2 + assert len(results["failed_cases"]) == 0 + assert len(results["max_labels_reached"]) == 0 + assert len(results["case_not_found"]) == 0 + # Verify API calls - should be called twice: once for multi-suite detection, once for case validation assert mock_get_cases.call_count == 2 - mock_get_cases.assert_has_calls([ - call(1, None), # Multi-suite detection - call(1, None) # Case validation - ]) + mock_get_cases.assert_has_calls([call(1, None), call(1, None)]) # Multi-suite detection # Case validation mock_get_labels.assert_called_once_with(1) mock_add_label.assert_called_once_with(1, "test-label") assert mock_send_get.call_count == 2 # Should call update_cases/{suite_id} once for batch update - mock_send_post.assert_called_once_with("update_cases/1", payload={ - 'case_ids': [1, 2], - 'labels': [5] - }) - + mock_send_post.assert_called_once_with("update_cases/1", payload={"case_ids": [1, 2], "labels": [5]}) + def test_add_labels_to_cases_single_case(self): """Test adding labels to a single test case using update_case endpoint""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler, 'add_label') as mock_add_label, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ - patch.object(self.labels_handler.client, 'send_post') as mock_send_post: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler, "get_labels" + ) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object( + self.labels_handler.client, "send_get" + ) as mock_send_get, patch.object( + self.labels_handler.client, "send_post" + ) as mock_send_post: + # Mock __get_all_cases response (case exists) - mock_get_cases.return_value = ([ - {"id": 1, "title": "Case 1"} - ], "") - + mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") + # Mock get_labels response (label doesn't exist) mock_get_labels.return_value = ({"labels": []}, "") - + # Mock add_label response (create new label) mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "") - + # Mock get_case response mock_send_get.return_value = MagicMock( - status_code=200, - response_text={"labels": [], "suite_id": 1, "title": "Case 1"} + status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"} ) - + # Mock update_case response (for single case) mock_send_post.return_value = MagicMock(status_code=200) - + # Test the method with single case results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1], - title="test-label", - project_id=1 + case_ids=[1], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 1 - assert len(results['failed_cases']) == 0 - assert len(results['max_labels_reached']) == 0 - assert len(results['case_not_found']) == 0 - + assert len(results["successful_cases"]) == 1 + assert len(results["failed_cases"]) == 0 + assert len(results["max_labels_reached"]) == 0 + assert len(results["case_not_found"]) == 0 + # Verify API calls assert mock_get_cases.call_count == 2 mock_get_labels.assert_called_once_with(1) mock_add_label.assert_called_once_with(1, "test-label") assert mock_send_get.call_count == 1 # Should call update_case/{case_id} once for single case - mock_send_post.assert_called_once_with("update_case/1", payload={'labels': [5]}) + mock_send_post.assert_called_once_with("update_case/1", payload={"labels": [5]}) def test_add_labels_to_cases_existing_label(self): """Test adding labels when label already exists""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler, 'add_label') as mock_add_label, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ - patch.object(self.labels_handler.client, 'send_post') as mock_send_post: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler, "get_labels" + ) as mock_get_labels, patch.object(self.labels_handler, "add_label") as mock_add_label, patch.object( + self.labels_handler.client, "send_get" + ) as mock_send_get, patch.object( + self.labels_handler.client, "send_post" + ) as mock_send_post: + # Mock __get_all_cases response (case exists) mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") - + # Mock get_labels response (label exists) mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") - + # Mock get_case response - mock_send_get.return_value = MagicMock(status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"}) - + mock_send_get.return_value = MagicMock( + status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"} + ) + # Mock add_label_to_case response mock_send_post.return_value = MagicMock(status_code=200) - + # Test the method results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1], - title="test-label", - project_id=1 + case_ids=[1], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 1 - assert len(results['case_not_found']) == 0 - + assert len(results["successful_cases"]) == 1 + assert len(results["case_not_found"]) == 0 + # Verify add_label was not called (label already exists) mock_add_label.assert_not_called() - + def test_add_labels_to_cases_max_labels_reached(self): """Test handling of maximum labels limit (10)""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler, "get_labels" + ) as mock_get_labels, patch.object(self.labels_handler.client, "send_get") as mock_send_get: + # Mock __get_all_cases response (case exists) mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") - + # Mock get_labels response mock_get_labels.return_value = ({"labels": [{"id": 15, "title": "test-label"}]}, "") - + # Mock get_case response with 10 existing labels (different from test-label) existing_labels = [{"id": i, "title": f"label-{i}"} for i in range(1, 11)] - mock_send_get.return_value = MagicMock( - status_code=200, - response_text={"labels": existing_labels} - ) - + mock_send_get.return_value = MagicMock(status_code=200, response_text={"labels": existing_labels}) + # Test the method results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1], - title="test-label", - project_id=1 + case_ids=[1], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 0 - assert len(results['failed_cases']) == 0 - assert len(results['max_labels_reached']) == 1 - assert len(results['case_not_found']) == 0 - assert results['max_labels_reached'][0] == 1 - + assert len(results["successful_cases"]) == 0 + assert len(results["failed_cases"]) == 0 + assert len(results["max_labels_reached"]) == 1 + assert len(results["case_not_found"]) == 0 + assert results["max_labels_reached"][0] == 1 + def test_add_labels_to_cases_label_already_on_case(self): """Test handling when label already exists on case""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler, "get_labels" + ) as mock_get_labels, patch.object(self.labels_handler.client, "send_get") as mock_send_get: + # Mock __get_all_cases response (case exists) mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") - + # Mock get_labels response mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") - + # Mock get_case response with the label already present mock_send_get.return_value = MagicMock( - status_code=200, - response_text={"labels": [{"id": 5, "title": "test-label"}]} + status_code=200, response_text={"labels": [{"id": 5, "title": "test-label"}]} ) - + # Test the method results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1], - title="test-label", - project_id=1 + case_ids=[1], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 1 - assert len(results['case_not_found']) == 0 - assert "already exists" in results['successful_cases'][0]['message'] + assert len(results["successful_cases"]) == 1 + assert len(results["case_not_found"]) == 0 + assert "already exists" in results["successful_cases"][0]["message"] def test_add_labels_to_cases_case_not_found(self): """Test handling when case IDs don't exist""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases: + # Mock __get_all_cases response (no cases exist) mock_get_cases.return_value = ([], "") - + # Test the method with case IDs that don't exist results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[999, 1000, 1001], - title="test-label", - project_id=1 + case_ids=[999, 1000, 1001], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - all cases should be in case_not_found - assert len(results['case_not_found']) == 3 - assert 999 in results['case_not_found'] - assert 1000 in results['case_not_found'] - assert 1001 in results['case_not_found'] - + assert len(results["case_not_found"]) == 3 + assert 999 in results["case_not_found"] + assert 1000 in results["case_not_found"] + assert 1001 in results["case_not_found"] + # Verify that no other processing happened since no valid cases - assert len(results['successful_cases']) == 0 - assert len(results['failed_cases']) == 0 - assert len(results['max_labels_reached']) == 0 - + assert len(results["successful_cases"]) == 0 + assert len(results["failed_cases"]) == 0 + assert len(results["max_labels_reached"]) == 0 + def test_get_cases_by_label_with_label_ids(self): """Test getting cases by label IDs""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases: + # Mock cases response mock_cases = [ {"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "label1"}]}, {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "label2"}]}, - {"id": 3, "title": "Test Case 3", "labels": [{"id": 5, "title": "label1"}]} + {"id": 3, "title": "Test Case 3", "labels": [{"id": 5, "title": "label1"}]}, ] mock_get_cases.return_value = (mock_cases, "") - + # Test the method matching_cases, error_message = self.labels_handler.get_cases_by_label( - project_id=1, - suite_id=None, - label_ids=[5] + project_id=1, suite_id=None, label_ids=[5] ) - + # Verify no error assert error_message == "" - + # Verify results (should return cases 1 and 3) assert len(matching_cases) == 2 - assert matching_cases[0]['id'] == 1 - assert matching_cases[1]['id'] == 3 - + assert matching_cases[0]["id"] == 1 + assert matching_cases[1]["id"] == 3 + def test_get_cases_by_label_with_title(self): """Test getting cases by label title""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler, "get_labels" + ) as mock_get_labels: + # Mock labels response mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") - + # Mock cases response mock_cases = [ {"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "test-label"}]}, - {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "other-label"}]} + {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "other-label"}]}, ] mock_get_cases.return_value = (mock_cases, "") - + # Test the method matching_cases, error_message = self.labels_handler.get_cases_by_label( - project_id=1, - suite_id=None, - label_title="test-label" + project_id=1, suite_id=None, label_title="test-label" ) - + # Verify no error assert error_message == "" - + # Verify results (should return case 1) assert len(matching_cases) == 1 - assert matching_cases[0]['id'] == 1 - + assert matching_cases[0]["id"] == 1 + def test_get_cases_by_label_title_not_found(self): """Test getting cases by non-existent label title""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler, "get_labels" + ) as mock_get_labels: + # Mock labels response (no matching label) mock_get_labels.return_value = ({"labels": []}, "") - + # Mock get_all_cases to return empty (not called due to early return) mock_get_cases.return_value = ([], "") - + # Test the method matching_cases, error_message = self.labels_handler.get_cases_by_label( - project_id=1, - suite_id=None, - label_title="non-existent-label" + project_id=1, suite_id=None, label_title="non-existent-label" ) - + # Verify error assert error_message == "" assert matching_cases == [] - + def test_get_cases_by_label_no_matching_cases(self): """Test getting cases when no cases have the specified label""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases: + # Mock cases response (no cases with target label) mock_cases = [ {"id": 1, "title": "Test Case 1", "labels": [{"id": 6, "title": "other-label"}]}, - {"id": 2, "title": "Test Case 2", "labels": []} + {"id": 2, "title": "Test Case 2", "labels": []}, ] mock_get_cases.return_value = (mock_cases, "") - + # Test the method matching_cases, error_message = self.labels_handler.get_cases_by_label( - project_id=1, - suite_id=None, - label_ids=[5] + project_id=1, suite_id=None, label_ids=[5] ) - + # Verify no error but no results assert error_message == "" assert len(matching_cases) == 0 @@ -714,362 +614,279 @@ def test_add_labels_to_tests_success_single(self, labels_handler): """Test successful label addition to a single test""" # Mock test validation mock_test_response = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, error_message=None ) - + # Mock run validation mock_run_response = APIClientResult( - status_code=200, - response_text={"id": 1, "project_id": 1}, - error_message=None + status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None ) - + # Mock existing labels - mock_labels_response = APIClientResult( - status_code=200, - response_text={"labels": []}, - error_message=None - ) - + mock_labels_response = APIClientResult(status_code=200, response_text={"labels": []}, error_message=None) + # Mock label creation mock_add_label_response = APIClientResult( - status_code=200, - response_text={"id": 5, "title": "Test Label"}, - error_message=None + status_code=200, response_text={"id": 5, "title": "Test Label"}, error_message=None ) - + # Mock test update mock_update_response = APIClientResult( - status_code=200, - response_text={"id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, - error_message=None + status_code=200, response_text={"id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, error_message=None ) - - with patch.object(labels_handler.client, 'send_get') as mock_get, \ - patch.object(labels_handler.client, 'send_post') as mock_post: - + + with patch.object(labels_handler.client, "send_get") as mock_get, patch.object( + labels_handler.client, "send_post" + ) as mock_post: + # Setup get responses for validation and label retrieval mock_get.side_effect = [ mock_test_response, # get_test/{test_id} - mock_run_response, # get_run/{run_id} - mock_labels_response, # get_labels + mock_run_response, # get_run/{run_id} + mock_labels_response, # get_labels mock_test_response, # get_test/{test_id} again for labels check ] - + # Setup post responses for label creation and test update - mock_post.side_effect = [ - mock_add_label_response, # add_label - mock_update_response # update_test - ] - - result, error = labels_handler.add_labels_to_tests( - test_ids=[1], - titles="Test Label", - project_id=1 - ) - + mock_post.side_effect = [mock_add_label_response, mock_update_response] # add_label # update_test + + result, error = labels_handler.add_labels_to_tests(test_ids=[1], titles="Test Label", project_id=1) + assert error == "" - assert len(result['successful_tests']) == 1 - assert len(result['failed_tests']) == 0 - assert len(result['test_not_found']) == 0 - assert len(result['max_labels_reached']) == 0 + assert len(result["successful_tests"]) == 1 + assert len(result["failed_tests"]) == 0 + assert len(result["test_not_found"]) == 0 + assert len(result["max_labels_reached"]) == 0 def test_add_labels_to_tests_test_not_found(self, labels_handler): """Test handling of non-existent test IDs""" # Mock test not found - mock_test_response = APIClientResult( - status_code=404, - response_text=None, - error_message="Test not found" - ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): - result, error = labels_handler.add_labels_to_tests( - test_ids=[999], - titles="Test Label", - project_id=1 - ) - + mock_test_response = APIClientResult(status_code=404, response_text=None, error_message="Test not found") + + with patch.object(labels_handler.client, "send_get", return_value=mock_test_response): + result, error = labels_handler.add_labels_to_tests(test_ids=[999], titles="Test Label", project_id=1) + assert error == "" - assert len(result['test_not_found']) == 1 - assert 999 in result['test_not_found'] + assert len(result["test_not_found"]) == 1 + assert 999 in result["test_not_found"] def test_add_labels_to_tests_max_labels_reached(self, labels_handler): """Test handling of tests that already have maximum labels""" # Create 10 existing labels existing_labels = [{"id": i, "title": f"Label {i}"} for i in range(1, 11)] - + # Mock test with max labels mock_test_response = APIClientResult( status_code=200, response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": existing_labels}, - error_message=None + error_message=None, ) - + # Mock run validation mock_run_response = APIClientResult( - status_code=200, - response_text={"id": 1, "project_id": 1}, - error_message=None + status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None ) - + # Mock existing labels - mock_labels_response = APIClientResult( - status_code=200, - response_text={"labels": []}, - error_message=None - ) - + mock_labels_response = APIClientResult(status_code=200, response_text={"labels": []}, error_message=None) + # Mock label creation mock_add_label_response = APIClientResult( - status_code=200, - response_text={"id": 11, "title": "New Label"}, - error_message=None + status_code=200, response_text={"id": 11, "title": "New Label"}, error_message=None ) - - with patch.object(labels_handler.client, 'send_get') as mock_get, \ - patch.object(labels_handler.client, 'send_post') as mock_post: - + + with patch.object(labels_handler.client, "send_get") as mock_get, patch.object( + labels_handler.client, "send_post" + ) as mock_post: + mock_get.side_effect = [ - mock_test_response, # get_test/{test_id} - mock_run_response, # get_run/{run_id} - mock_labels_response, # get_labels - mock_test_response, # get_test/{test_id} again for labels check + mock_test_response, # get_test/{test_id} + mock_run_response, # get_run/{run_id} + mock_labels_response, # get_labels + mock_test_response, # get_test/{test_id} again for labels check ] - + mock_post.return_value = mock_add_label_response - - result, error = labels_handler.add_labels_to_tests( - test_ids=[1], - titles="New Label", - project_id=1 - ) - + + result, error = labels_handler.add_labels_to_tests(test_ids=[1], titles="New Label", project_id=1) + assert error == "" - assert len(result['max_labels_reached']) == 1 - assert 1 in result['max_labels_reached'] + assert len(result["max_labels_reached"]) == 1 + assert 1 in result["max_labels_reached"] def test_get_tests_by_label_success(self, labels_handler): """Test successful retrieval of tests by label""" # Mock runs response mock_runs_response = APIClientResult( - status_code=200, - response_text={"runs": [{"id": 1}, {"id": 2}]}, - error_message=None + status_code=200, response_text={"runs": [{"id": 1}, {"id": 2}]}, error_message=None ) - + # Mock tests responses for each run mock_tests_response_run1 = APIClientResult( status_code=200, - response_text={"tests": [ - {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}, - {"id": 2, "title": "Test 2", "labels": []} - ]}, - error_message=None + response_text={ + "tests": [ + {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}, + {"id": 2, "title": "Test 2", "labels": []}, + ] + }, + error_message=None, ) - + mock_tests_response_run2 = APIClientResult( status_code=200, - response_text={"tests": [ - {"id": 3, "title": "Test 3", "labels": [{"id": 5, "title": "Test Label"}]} - ]}, - error_message=None + response_text={"tests": [{"id": 3, "title": "Test 3", "labels": [{"id": 5, "title": "Test Label"}]}]}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get') as mock_get: + + with patch.object(labels_handler.client, "send_get") as mock_get: mock_get.side_effect = [ - mock_runs_response, # get_runs/{project_id} + mock_runs_response, # get_runs/{project_id} mock_tests_response_run1, # get_tests/{run_id} for run 1 - mock_tests_response_run2 # get_tests/{run_id} for run 2 + mock_tests_response_run2, # get_tests/{run_id} for run 2 ] - - result, error = labels_handler.get_tests_by_label( - project_id=1, - label_ids=[5] - ) - + + result, error = labels_handler.get_tests_by_label(project_id=1, label_ids=[5]) + assert error == "" assert len(result) == 2 - assert result[0]['id'] == 1 - assert result[1]['id'] == 3 + assert result[0]["id"] == 1 + assert result[1]["id"] == 3 def test_get_tests_by_label_with_run_ids(self, labels_handler): """Test retrieval of tests by label filtered by specific run IDs""" # Mock run responses for specific run IDs mock_run_response_1 = APIClientResult( - status_code=200, - response_text={"id": 1, "name": "Test Run 1"}, - error_message=None + status_code=200, response_text={"id": 1, "name": "Test Run 1"}, error_message=None ) - + mock_run_response_2 = APIClientResult( - status_code=200, - response_text={"id": 2, "name": "Test Run 2"}, - error_message=None + status_code=200, response_text={"id": 2, "name": "Test Run 2"}, error_message=None ) - + # Mock tests responses for each run mock_tests_response_run1 = APIClientResult( status_code=200, - response_text={"tests": [ - {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]} - ]}, - error_message=None + response_text={"tests": [{"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}]}, + error_message=None, ) - + mock_tests_response_run2 = APIClientResult( status_code=200, - response_text={"tests": [ - {"id": 2, "title": "Test 2", "labels": [{"id": 5, "title": "Test Label"}]} - ]}, - error_message=None + response_text={"tests": [{"id": 2, "title": "Test 2", "labels": [{"id": 5, "title": "Test Label"}]}]}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get') as mock_get: + + with patch.object(labels_handler.client, "send_get") as mock_get: mock_get.side_effect = [ - mock_run_response_1, # get_run/1 - mock_run_response_2, # get_run/2 + mock_run_response_1, # get_run/1 + mock_run_response_2, # get_run/2 mock_tests_response_run1, # get_tests/1 - mock_tests_response_run2 # get_tests/2 + mock_tests_response_run2, # get_tests/2 ] - - result, error = labels_handler.get_tests_by_label( - project_id=1, - label_ids=[5], - run_ids=[1, 2] - ) - + + result, error = labels_handler.get_tests_by_label(project_id=1, label_ids=[5], run_ids=[1, 2]) + assert error == "" assert len(result) == 2 - assert result[0]['id'] == 1 - assert result[1]['id'] == 2 + assert result[0]["id"] == 1 + assert result[1]["id"] == 2 def test_get_test_labels_success(self, labels_handler): """Test successful retrieval of test labels""" # Mock test responses mock_test_response1 = APIClientResult( status_code=200, - response_text={ - "id": 1, - "title": "Test 1", - "status_id": 1, - "labels": [{"id": 5, "title": "Test Label"}] - }, - error_message=None + response_text={"id": 1, "title": "Test 1", "status_id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, + error_message=None, ) - + mock_test_response2 = APIClientResult( status_code=200, - response_text={ - "id": 2, - "title": "Test 2", - "status_id": 2, - "labels": [] - }, - error_message=None + response_text={"id": 2, "title": "Test 2", "status_id": 2, "labels": []}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get') as mock_get: + + with patch.object(labels_handler.client, "send_get") as mock_get: mock_get.side_effect = [mock_test_response1, mock_test_response2] - + result, error = labels_handler.get_test_labels([1, 2]) - + assert error == "" assert len(result) == 2 - + # Check first test - assert result[0]['test_id'] == 1 - assert result[0]['title'] == "Test 1" - assert result[0]['status_id'] == 1 - assert len(result[0]['labels']) == 1 - assert result[0]['labels'][0]['title'] == "Test Label" - assert result[0]['error'] is None - + assert result[0]["test_id"] == 1 + assert result[0]["title"] == "Test 1" + assert result[0]["status_id"] == 1 + assert len(result[0]["labels"]) == 1 + assert result[0]["labels"][0]["title"] == "Test Label" + assert result[0]["error"] is None + # Check second test - assert result[1]['test_id'] == 2 - assert result[1]['title'] == "Test 2" - assert result[1]['status_id'] == 2 - assert len(result[1]['labels']) == 0 - assert result[1]['error'] is None + assert result[1]["test_id"] == 2 + assert result[1]["title"] == "Test 2" + assert result[1]["status_id"] == 2 + assert len(result[1]["labels"]) == 0 + assert result[1]["error"] is None def test_get_test_labels_test_not_found(self, labels_handler): """Test handling of non-existent test IDs in get_test_labels""" # Mock test not found - mock_test_response = APIClientResult( - status_code=404, - response_text=None, - error_message="Test not found" - ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): + mock_test_response = APIClientResult(status_code=404, response_text=None, error_message="Test not found") + + with patch.object(labels_handler.client, "send_get", return_value=mock_test_response): result, error = labels_handler.get_test_labels([999]) - + assert error == "" assert len(result) == 1 - assert result[0]['test_id'] == 999 - assert result[0]['error'] == "Test 999 not found or inaccessible" - assert result[0]['labels'] == [] + assert result[0]["test_id"] == 999 + assert result[0]["error"] == "Test 999 not found or inaccessible" + assert result[0]["labels"] == [] def test_add_labels_to_tests_batch_update(self, labels_handler): """Test batch update of multiple tests""" # Mock test validation for multiple tests mock_test_response1 = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, error_message=None ) - + mock_test_response2 = APIClientResult( - status_code=200, - response_text={"id": 2, "title": "Test 2", "run_id": 1, "labels": []}, - error_message=None + status_code=200, response_text={"id": 2, "title": "Test 2", "run_id": 1, "labels": []}, error_message=None ) - + # Mock run validation mock_run_response = APIClientResult( - status_code=200, - response_text={"id": 1, "project_id": 1}, - error_message=None + status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None ) - + # Mock existing labels mock_labels_response = APIClientResult( - status_code=200, - response_text={"labels": [{"id": 5, "title": "Test Label"}]}, - error_message=None + status_code=200, response_text={"labels": [{"id": 5, "title": "Test Label"}]}, error_message=None ) - + # Mock batch update - mock_batch_response = APIClientResult( - status_code=200, - response_text={"updated": 2}, - error_message=None - ) - - with patch.object(labels_handler.client, 'send_get') as mock_get, \ - patch.object(labels_handler.client, 'send_post') as mock_post: - + mock_batch_response = APIClientResult(status_code=200, response_text={"updated": 2}, error_message=None) + + with patch.object(labels_handler.client, "send_get") as mock_get, patch.object( + labels_handler.client, "send_post" + ) as mock_post: + # Setup get responses mock_get.side_effect = [ mock_test_response1, # get_test/1 - mock_run_response, # get_run/1 + mock_run_response, # get_run/1 mock_test_response2, # get_test/2 - mock_run_response, # get_run/1 - mock_labels_response, # get_labels + mock_run_response, # get_run/1 + mock_labels_response, # get_labels mock_test_response1, # get_test/1 for labels check mock_test_response2, # get_test/2 for labels check ] - + # Setup batch update response mock_post.return_value = mock_batch_response - - result, error = labels_handler.add_labels_to_tests( - test_ids=[1, 2], - titles="Test Label", - project_id=1 - ) - + + result, error = labels_handler.add_labels_to_tests(test_ids=[1, 2], titles="Test Label", project_id=1) + assert error == "" - assert len(result['successful_tests']) == 2 \ No newline at end of file + assert len(result["successful_tests"]) == 2 diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 2879972..39481d9 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -26,7 +26,7 @@ def _run_cmd(multiline_cmd: str): process = subprocess.Popen(single_line_cmd, shell=True, stdout=subprocess.PIPE) with process.stdout: output = "" - for line in iter(process.stdout.readline, b''): + for line in iter(process.stdout.readline, b""): output += line.decode() print(output) process.wait() @@ -57,7 +57,7 @@ def _run_cmd_allow_failure(multiline_cmd: str): process = subprocess.Popen(single_line_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) with process.stdout: output = "" - for line in iter(process.stdout.readline, b''): + for line in iter(process.stdout.readline, b""): output += line.decode() print(output) process.wait() @@ -69,53 +69,58 @@ class TestsEndToEnd: # TestRail 101 instance has the required configuration for this test run TR_INSTANCE = "https://testrail101.testrail.io/" # Uncomment and enter your credentials below in order to execute the tests locally - #os.environ.setdefault("TR_CLI_USERNAME", "") - #os.environ.setdefault("TR_CLI_PASSWORD", "") + # os.environ.setdefault("TR_CLI_USERNAME", "") + # os.environ.setdefault("TR_CLI_PASSWORD", "") @pytest.fixture(autouse=True, scope="module") def install_trcli(self): _run_cmd("cd .. && pip install .") def test_cli_robot_report_RF50(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_robot \\ --title "[CLI-E2E-Tests] ROBOT FRAMEWORK PARSER" \\ -f "reports_robot/simple_report_RF50.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) def test_cli_robot_report_RF70(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_robot \\ --title "[CLI-E2E-Tests] ROBOT FRAMEWORK PARSER" \\ -f "reports_robot/simple_report_RF50.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) def test_cli_plan_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -123,19 +128,21 @@ def test_cli_plan_id(self): --plan-id 1578 \\ --title "[CLI-E2E-Tests] With Plan ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_plan_id_and_config_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -144,19 +151,21 @@ def test_cli_plan_id_and_config_id(self): --config-ids 142,143 \\ --title "[CLI-E2E-Tests] With Plan ID and Config ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_update_run_in_plan(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -164,19 +173,21 @@ def test_cli_update_run_in_plan(self): --run-id 1550 \\ --title "[CLI-E2E-Tests] Update Run in Plan" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) - + def test_cli_update_run_in_plan_with_configs(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -184,38 +195,42 @@ def test_cli_update_run_in_plan_with_configs(self): --run-id 1551 \\ --title "[CLI-E2E-Tests] Update Run in Plan with Configs" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Matcher: AUTO" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto_update_run(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -224,38 +239,42 @@ def test_cli_matchers_auto_update_run(self): --run-id "1568" \\ --milestone-id "107" \\ -f "reports_junit/generic_ids_auto_plus_one.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto_multiple_files(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Matcher: AUTO with multiple files" \\ -f "reports_junit/junit_multiple_parts_*" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [MULTIPART-REPORT-2]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 4 test results in" - ] + "Submitted 4 test results in", + ], ) - + def test_cli_matchers_name(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -n \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -263,7 +282,8 @@ def test_cli_matchers_name(self): --title "[CLI-E2E-Tests] Matcher: NAME" \\ --case-matcher "NAME" \\ -f "reports_junit/generic_ids_name.xml" - """) + """ + ) _assert_contains( output, [ @@ -271,12 +291,13 @@ def test_cli_matchers_name(self): "Found 3 test cases without case ID in the report file.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def test_cli_matchers_property(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -n \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -284,7 +305,8 @@ def test_cli_matchers_property(self): --title "[CLI-E2E-Tests] Matcher: PROPERTY" \\ --case-matcher "PROPERTY" \\ -f "reports_junit/generic_ids_property.xml" - """) + """ + ) _assert_contains( output, [ @@ -292,30 +314,34 @@ def test_cli_matchers_property(self): "Found 3 test cases without case ID in the report file.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def test_cli_attachments(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Attachments test" \\ -f "reports_junit/attachments.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [ATTACHMENTS]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 4 attachments for 2 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) + def test_cli_multisuite_with_suite_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ @@ -323,7 +349,8 @@ def test_cli_multisuite_with_suite_id(self): --title "[CLI-E2E-Tests] Multisuite with suite id" \\ --suite-id 128 \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -333,12 +360,13 @@ def test_cli_multisuite_with_suite_id(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) def test_cli_multisuite_with_suite_name(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ @@ -346,7 +374,8 @@ def test_cli_multisuite_with_suite_name(self): --suite-name "My suite" \\ --title "[CLI-E2E-Tests] Multisuite without suite id" \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -356,19 +385,21 @@ def test_cli_multisuite_with_suite_name(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) def test_cli_multisuite_without_suite_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ parse_junit \\ --title "[CLI-E2E-Tests] Multisuite without suite id" \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -378,12 +409,13 @@ def test_cli_multisuite_without_suite_id(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) - + def test_cli_saucelabs(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -391,7 +423,8 @@ def test_cli_saucelabs(self): --title "[CLI-E2E-Tests] saucectl parser" \\ --special-parser "saucectl" \\ -f "reports_junit/saucelabs.xml" - """) + """ + ) _assert_contains( output, [ @@ -399,109 +432,114 @@ def test_cli_saucelabs(self): "Processing JUnit suite - Firefox", "Processing JUnit suite - Chrome", "Processed 1 test cases in section [SAUCELABS]", - f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view" - ] + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + ], ) - + def test_cli_openapi(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_openapi \\ -f "openapi_specs/openapi.yml" - """) - _assert_contains( - output, - [ - "Processed 22 test cases based on possible responses.", - "Submitted 22 test cases" - ] + """ ) + _assert_contains(output, ["Processed 22 test cases based on possible responses.", "Submitted 22 test cases"]) def test_cli_add_run(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Create run_config.yml" \\ -f "run_config.yml" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", "title: [CLI-E2E-Tests] ADD RUN TEST: Create run_config.yml", - "Writing test run data to file (run_config.yml). Done." - ] + "Writing test run data to file (run_config.yml). Done.", + ], ) - + def test_cli_add_run_include_all(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run --run-include-all\\ --title "[CLI-E2E-Tests] ADD RUN TEST: Include All Cases" \\ -f "run_config.yml" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", "title: [CLI-E2E-Tests] ADD RUN TEST: Include All Cases", - "Writing test run data to file (run_config.yml). Done." - ] + "Writing test run data to file (run_config.yml). Done.", + ], ) def test_cli_add_run_upload_results(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ -c run_config.yml \\ parse_junit \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results" - ] + "Submitted 6 test results", + ], ) - + def test_cli_add_run_and_plan_with_due_date(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run --run-include-all \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date" \\ --run-start-date "03/01/2030" --run-end-date "03/12/2030" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", - "title: [CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date" - ] + "title: [CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date", + ], ) def test_cli_add_run_refs_with_references(self): """Test creating a run with references""" import random import string - + # Generate random suffix to avoid conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - - output = _run_cmd(f""" + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -509,7 +547,8 @@ def test_cli_add_run_refs_with_references(self): --title "[CLI-E2E-Tests] ADD RUN TEST: With References {random_suffix}" \\ --run-refs "JIRA-100,JIRA-200,REQ-{random_suffix}" \\ -f "run_config_refs.yml" - """) + """ + ) _assert_contains( output, [ @@ -517,32 +556,32 @@ def test_cli_add_run_refs_with_references(self): f"Test run: {self.TR_INSTANCE}index.php?/runs/view", f"title: [CLI-E2E-Tests] ADD RUN TEST: With References {random_suffix}", f"Refs: JIRA-100,JIRA-200,REQ-{random_suffix}", - "Writing test run data to file (run_config_refs.yml). Done." - ] + "Writing test run data to file (run_config_refs.yml). Done.", + ], ) def test_cli_add_run_refs_validation_error(self): """Test references validation (too long)""" long_refs = "A" * 251 # Exceeds 250 character limit - - output, return_code = _run_cmd_allow_failure(f""" + + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Too Long" \\ --run-refs "{long_refs}" - """) - - assert return_code != 0 - _assert_contains( - output, - ["Error: References field cannot exceed 250 characters."] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: References field cannot exceed 250 characters."]) + def test_cli_add_run_refs_update_action_validation(self): """Test that update/delete actions require run_id""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -550,12 +589,15 @@ def test_cli_add_run_refs_update_action_validation(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Invalid Action" \\ --run-refs "JIRA-123" \\ --run-refs-action "update" - """) - + """ + ) + assert return_code != 0 _assert_contains( output, - ["Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required)."] + [ + "Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required)." + ], ) def test_cli_add_run_refs_update_workflow(self): @@ -563,12 +605,13 @@ def test_cli_add_run_refs_update_workflow(self): import random import string import re - + # Generate random suffix to avoid conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Step 1: Create a run with initial references - create_output = _run_cmd(f""" + create_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -576,24 +619,19 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "JIRA-100,JIRA-200" \\ -f "run_config_workflow.yml" - """) - + """ + ) + # Extract run ID from output - run_id_match = re.search(r'run_id: (\d+)', create_output) + run_id_match = re.search(r"run_id: (\d+)", create_output) assert run_id_match, "Could not extract run ID from output" run_id = run_id_match.group(1) - - _assert_contains( - create_output, - [ - "Creating test run.", - f"run_id: {run_id}", - "Refs: JIRA-100,JIRA-200" - ] - ) - + + _assert_contains(create_output, ["Creating test run.", f"run_id: {run_id}", "Refs: JIRA-100,JIRA-200"]) + # Step 2: Add more references to the existing run - add_output = _run_cmd(f""" + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -602,19 +640,14 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "JIRA-300,REQ-{random_suffix}" \\ --run-refs-action "add" - """) - - _assert_contains( - add_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs Action: add" - ] + """ ) - + + _assert_contains(add_output, ["Updating test run.", f"run_id: {run_id}", "Refs Action: add"]) + # Step 3: Update (replace) all references - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -623,20 +656,16 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "NEW-100,NEW-200" \\ --run-refs-action "update" - """) - + """ + ) + _assert_contains( - update_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs: NEW-100,NEW-200", - "Refs Action: update" - ] + update_output, ["Updating test run.", f"run_id: {run_id}", "Refs: NEW-100,NEW-200", "Refs Action: update"] ) - + # Step 4: Delete specific references - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -645,19 +674,14 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "NEW-100" \\ --run-refs-action "delete" - """) - - _assert_contains( - delete_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs Action: delete" - ] + """ ) - + + _assert_contains(delete_output, ["Updating test run.", f"run_id: {run_id}", "Refs Action: delete"]) + # Step 5: Delete all references - delete_all_output = _run_cmd(f""" + delete_all_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -665,21 +689,16 @@ def test_cli_add_run_refs_update_workflow(self): --run-id {run_id} \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs-action "delete" - """) - - _assert_contains( - delete_all_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs: ", - "Refs Action: delete" - ] + """ ) + _assert_contains( + delete_all_output, ["Updating test run.", f"run_id: {run_id}", "Refs: ", "Refs Action: delete"] + ) def bug_test_cli_robot_description_bug(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -687,18 +706,20 @@ def bug_test_cli_robot_description_bug(self): --title "[CLI-E2E-Tests] RUN DESCRIPTION BUG" \\ -f "reports_robot/simple_report_RF50.xml" \\ --run-id 2332 - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def bug_test_automation_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -706,225 +727,221 @@ def bug_test_automation_id(self): parse_junit \\ --title "(DO NOT DELETE) [CLI-E2E-Tests] Test updated Automation ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results" - ] + "Submitted 6 test results", + ], ) def test_labels_full_workflow(self): """Test complete labels workflow: add, list, get, update, delete""" - + # Generate random suffix to avoid conflicts with existing labels import random import string - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) label_title = f"e2e-{random_suffix}" assert len(label_title) <= 20, f"Label title '{label_title}' exceeds 20 characters" - + # Step 1: Add a new label - add_output = _run_cmd(f""" + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label_title}" - """) + """ + ) _assert_contains( - add_output, - [ - f"Adding label '{label_title}'...", - "Successfully added label: ID=", - f"Title='{label_title}'" - ] + add_output, [f"Adding label '{label_title}'...", "Successfully added label: ID=", f"Title='{label_title}'"] ) - + # Extract label ID from the add output import re + label_id_match = re.search(r"ID=(\d+)", add_output) assert label_id_match, f"Could not find label ID in output: {add_output}" label_id = label_id_match.group(1) print(f"Created label with ID: {label_id}") - + # Step 2: List labels to verify it exists - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) - _assert_contains( - list_output, - [ - "Retrieving labels...", - "Found", - f"ID: {label_id}, Title: '{label_title}'" - ] + """ ) - + _assert_contains(list_output, ["Retrieving labels...", "Found", f"ID: {label_id}, Title: '{label_title}'"]) + # Step 3: Get the specific label - get_output = _run_cmd(f""" + get_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id {label_id} - """) + """ + ) _assert_contains( get_output, - [ - f"Retrieving label with ID {label_id}...", - "Label details:", - f"ID: {label_id}", - f"Title: '{label_title}'" - ] + [f"Retrieving label with ID {label_id}...", "Label details:", f"ID: {label_id}", f"Title: '{label_title}'"], ) - + # Step 4: Update the label updated_title = f"upd-{random_suffix}" assert len(updated_title) <= 20, f"Updated title '{updated_title}' exceeds 20 characters" - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels update \\ --id {label_id} \\ --title "{updated_title}" - """) + """ + ) _assert_contains( update_output, [ f"Updating label with ID {label_id}...", f"Successfully updated label: ID={label_id}", - f"Title='{updated_title}'" - ] + f"Title='{updated_title}'", + ], ) - + # Step 5: Verify the update by getting the label again - get_updated_output = _run_cmd(f""" + get_updated_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id {label_id} - """) - _assert_contains( - get_updated_output, - [ - f"ID: {label_id}", - f"Title: '{updated_title}'" - ] + """ ) - + _assert_contains(get_updated_output, [f"ID: {label_id}", f"Title: '{updated_title}'"]) + # Step 6: Delete the label (with confirmation) - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) - _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + """ ) + _assert_contains(delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"]) def test_labels_add_multiple_and_delete_multiple(self): """Test adding multiple labels and deleting them in batch""" - + # Generate random suffix to avoid conflicts with existing labels import random import string - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Add first label label1_title = f"b1-{random_suffix}" assert len(label1_title) <= 20, f"Label1 title '{label1_title}' exceeds 20 characters" - add_output1 = _run_cmd(f""" + add_output1 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label1_title}" - """) - + """ + ) + # Add second label label2_title = f"b2-{random_suffix}" assert len(label2_title) <= 20, f"Label2 title '{label2_title}' exceeds 20 characters" - add_output2 = _run_cmd(f""" + add_output2 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label2_title}" - """) - + """ + ) + # Add third label label3_title = f"b3-{random_suffix}" assert len(label3_title) <= 20, f"Label3 title '{label3_title}' exceeds 20 characters" - add_output3 = _run_cmd(f""" + add_output3 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label3_title}" - """) - + """ + ) + # Extract all label IDs import re + label_id1 = re.search(r"ID=(\d+)", add_output1).group(1) label_id2 = re.search(r"ID=(\d+)", add_output2).group(1) label_id3 = re.search(r"ID=(\d+)", add_output3).group(1) - + label_ids = f"{label_id1},{label_id2},{label_id3}" print(f"Created labels with IDs: {label_ids}") - + # Verify all labels exist in list - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) + """ + ) _assert_contains( list_output, [ f"ID: {label_id1}, Title: '{label1_title}'", f"ID: {label_id2}, Title: '{label2_title}'", - f"ID: {label_id3}, Title: '{label3_title}'" - ] + f"ID: {label_id3}, Title: '{label3_title}'", + ], ) - + # Delete all labels in batch - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_ids} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_ids}...", - "Successfully deleted 3 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_ids}...", "Successfully deleted 3 label(s)"] ) def test_labels_help_commands(self): """Test labels help functionality""" - + # Test main labels help - main_help_output = _run_cmd(f""" + main_help_output = _run_cmd( + f""" trcli labels --help - """) + """ + ) _assert_contains( main_help_output, [ @@ -933,27 +950,26 @@ def test_labels_help_commands(self): "delete Delete labels from TestRail", "get Get a specific label by ID", "list List all labels in the project", - "update Update an existing label in TestRail" - ] + "update Update an existing label in TestRail", + ], ) - + # Test add command help - add_help_output = _run_cmd(f""" + add_help_output = _run_cmd( + f""" trcli labels add --help - """) + """ + ) _assert_contains( - add_help_output, - [ - "Add a new label in TestRail", - "--title", - "Title of the label to add (max 20 characters)" - ] + add_help_output, ["Add a new label in TestRail", "--title", "Title of the label to add (max 20 characters)"] ) - + # Test update command help - update_help_output = _run_cmd(f""" + update_help_output = _run_cmd( + f""" trcli labels update --help - """) + """ + ) _assert_contains( update_help_output, [ @@ -961,345 +977,313 @@ def test_labels_help_commands(self): "--id", "--title", "ID of the label to update", - "New title for the label (max 20 characters)" - ] + "New title for the label (max 20 characters)", + ], ) - + # Test delete command help - delete_help_output = _run_cmd(f""" + delete_help_output = _run_cmd( + f""" trcli labels delete --help - """) + """ + ) _assert_contains( - delete_help_output, - [ - "Delete labels from TestRail", - "--ids", - "Comma-separated list of label IDs to delete" - ] + delete_help_output, ["Delete labels from TestRail", "--ids", "Comma-separated list of label IDs to delete"] ) - + # Test list command help - list_help_output = _run_cmd(f""" + list_help_output = _run_cmd( + f""" trcli labels list --help - """) + """ + ) _assert_contains( list_help_output, - [ - "List all labels in the project", - "--offset", - "--limit", - "Offset for pagination", - "Limit for pagination" - ] - ) - + ["List all labels in the project", "--offset", "--limit", "Offset for pagination", "Limit for pagination"], + ) + # Test get command help - get_help_output = _run_cmd(f""" + get_help_output = _run_cmd( + f""" trcli labels get --help - """) - _assert_contains( - get_help_output, - [ - "Get a specific label by ID", - "--id", - "ID of the label to retrieve" - ] + """ ) + _assert_contains(get_help_output, ["Get a specific label by ID", "--id", "ID of the label to retrieve"]) def test_labels_pagination(self): """Test labels pagination functionality""" - + # Test basic list command - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) - _assert_contains( - list_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) - + _assert_contains(list_output, ["Retrieving labels...", "Found"]) + # Test pagination with limit - paginated_output = _run_cmd(f""" + paginated_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list \\ --limit 5 - """) - _assert_contains( - paginated_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) - + _assert_contains(paginated_output, ["Retrieving labels...", "Found"]) + # Test pagination with offset and limit - offset_output = _run_cmd(f""" + offset_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list \\ --offset 0 \\ --limit 10 - """) - _assert_contains( - offset_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) + _assert_contains(offset_output, ["Retrieving labels...", "Found"]) def test_labels_validation_errors(self): """Test labels validation and error handling""" - + # Test title too long (more than 20 characters) - long_title_output, returncode = _run_cmd_allow_failure(f""" + long_title_output, returncode = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "ThisTitleIsWayTooLongForTheValidationLimit" - """) + """ + ) # Should fail with validation error assert returncode != 0, f"Expected validation error but command succeeded: {long_title_output}" assert "Error: Label title must be 20 characters or less." in long_title_output - + # Test invalid label ID for get - invalid_get_output, returncode = _run_cmd_allow_failure(f""" + invalid_get_output, returncode = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id 999999 - """) + """ + ) # Should fail with API error assert returncode != 0, f"Expected API error but command succeeded: {invalid_get_output}" assert "Failed to retrieve label:" in invalid_get_output - + # Test invalid label ID format for delete - invalid_delete_output, returncode = _run_cmd_allow_failure(f""" + invalid_delete_output, returncode = _run_cmd_allow_failure( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids "abc,def" - """) + """ + ) # Should fail with format validation error assert returncode != 0, f"Expected validation error but command succeeded: {invalid_delete_output}" assert "Error: Invalid label IDs format" in invalid_delete_output def test_labels_edge_cases(self): """Test labels edge cases and boundary conditions""" - + # Test with exactly 20 character title (boundary condition) twenty_char_title = "ExactlyTwentyCharss!" # Exactly 20 characters assert len(twenty_char_title) == 20, "Test title should be exactly 20 characters" - - add_output = _run_cmd(f""" + + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{twenty_char_title}" - """) - _assert_contains( - add_output, - [ - f"Adding label '{twenty_char_title}'...", - "Successfully added label:" - ] + """ ) - + _assert_contains(add_output, [f"Adding label '{twenty_char_title}'...", "Successfully added label:"]) + # Extract label ID for cleanup import re + label_id_match = re.search(r"ID=(\d+)", add_output) if label_id_match: label_id = label_id_match.group(1) - + # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"] ) - def test_labels_cases_full_workflow(self): """Test complete workflow of test case label operations""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) case_label_title = f"e2e-case-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{case_label_title}" - """) - _assert_contains( - add_label_output, - [ - f"Adding label '{case_label_title}'...", - "Successfully added label:" - ] + """ ) - + _assert_contains(add_label_output, [f"Adding label '{case_label_title}'...", "Successfully added label:"]) + # Extract label ID for later use import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) - + try: # Use known test case IDs that should exist in the test project test_case_ids = ["24964", "24965"] # Multiple test cases for batch testing - + # Add labels to test cases - add_cases_output = _run_cmd(f""" + add_cases_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "{','.join(test_case_ids)}" \\ --title "{case_label_title}" - """) + """ + ) _assert_contains( add_cases_output, [ f"Adding label '{case_label_title}' to {len(test_case_ids)} test case(s)...", - "Successfully processed" - ] + "Successfully processed", + ], ) - + # List test cases by label title - list_by_title_output = _run_cmd(f""" + list_by_title_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "{case_label_title}" - """) + """ + ) _assert_contains( list_by_title_output, - [ - f"Retrieving test cases with label title '{case_label_title}'...", - "matching test case(s):" - ] + [f"Retrieving test cases with label title '{case_label_title}'...", "matching test case(s):"], ) - + # List test cases by label ID - list_by_id_output = _run_cmd(f""" + list_by_id_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --ids "{label_id}" - """) + """ + ) _assert_contains( - list_by_id_output, - [ - f"Retrieving test cases with label IDs: {label_id}...", - "matching test case(s):" - ] + list_by_id_output, [f"Retrieving test cases with label IDs: {label_id}...", "matching test case(s):"] ) - + finally: # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"] ) def test_labels_cases_validation_errors(self): """Test validation errors for test case label commands""" # Test title too long for add cases - long_title_output, return_code = _run_cmd_allow_failure(f""" + long_title_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "1" \\ --title "this-title-is-way-too-long-for-testrail" - """) - assert return_code != 0 - _assert_contains( - long_title_output, - ["Error: Label title must be 20 characters or less."] + """ ) - + assert return_code != 0 + _assert_contains(long_title_output, ["Error: Label title must be 20 characters or less."]) + # Test invalid case IDs format - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "invalid,ids" \\ --title "test" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) - + # Test missing filter for list cases - no_filter_output, return_code = _run_cmd_allow_failure(f""" + no_filter_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list - """) - assert return_code != 0 - _assert_contains( - no_filter_output, - ["Error: Either --ids or --title must be provided."] + """ ) - + assert return_code != 0 + _assert_contains(no_filter_output, ["Error: Either --ids or --title must be provided."]) + # Test title too long for list cases - long_title_list_output, return_code = _run_cmd_allow_failure(f""" + long_title_list_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "this-title-is-way-too-long-for-testrail" - """) - assert return_code != 0 - _assert_contains( - long_title_list_output, - ["Error: Label title must be 20 characters or less."] + """ ) + assert return_code != 0 + _assert_contains(long_title_list_output, ["Error: Label title must be 20 characters or less."]) def test_labels_cases_help_commands(self): """Test help output for test case label commands""" @@ -1311,22 +1295,17 @@ def test_labels_cases_help_commands(self): "Usage: trcli labels cases [OPTIONS] COMMAND [ARGS]...", "Manage labels for test cases", "add Add a label to test cases", - "list List test cases filtered by label ID or title" - ] + "list List test cases filtered by label ID or title", + ], ) - + # Test cases add help cases_add_help_output = _run_cmd("trcli labels cases add --help") _assert_contains( cases_add_help_output, - [ - "Usage: trcli labels cases add [OPTIONS]", - "Add a label to test cases", - "--case-ids", - "--title" - ] + ["Usage: trcli labels cases add [OPTIONS]", "Add a label to test cases", "--case-ids", "--title"], ) - + # Test cases list help cases_list_help_output = _run_cmd("trcli labels cases list --help") _assert_contains( @@ -1335,73 +1314,76 @@ def test_labels_cases_help_commands(self): "Usage: trcli labels cases list [OPTIONS]", "List test cases filtered by label ID or title", "--ids", - "--title" - ] + "--title", + ], ) def test_labels_cases_no_matching_cases(self): """Test behavior when no test cases match the specified label""" # Test with non-existent label title - no_match_output = _run_cmd(f""" + no_match_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "non-existent-label" - """) + """ + ) _assert_contains( no_match_output, [ "Retrieving test cases with label title 'non-existent-label'...", "Found 0 matching test case(s):", - "No test cases found with label title 'non-existent-label'." - ] + "No test cases found with label title 'non-existent-label'.", + ], ) - + # Test with non-existent label ID - no_match_id_output = _run_cmd(f""" + no_match_id_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --ids "99999" - """) + """ + ) _assert_contains( no_match_id_output, [ "Retrieving test cases with label IDs: 99999...", "Found 0 matching test case(s):", - "No test cases found with the specified label IDs." - ] + "No test cases found with the specified label IDs.", + ], ) def test_labels_cases_single_case_workflow(self): """Test single case label operations using update_case endpoint""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) single_case_label_title = f"e2e-single-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( - add_label_output, - [ - f"Adding label '{single_case_label_title}'...", - "Successfully added label:" - ] + add_label_output, [f"Adding label '{single_case_label_title}'...", "Successfully added label:"] ) # Extract label ID for later use import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) @@ -1411,77 +1393,80 @@ def test_labels_cases_single_case_workflow(self): single_case_id = "24964" # Add label to single test case - add_single_case_output = _run_cmd(f""" + add_single_case_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "{single_case_id}" \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( add_single_case_output, [ f"Adding label '{single_case_label_title}' to 1 test case(s)...", "Successfully processed 1 case(s):", - f"Successfully added label '{single_case_label_title}' to case {single_case_id}" - ] + f"Successfully added label '{single_case_label_title}' to case {single_case_id}", + ], ) # Verify the label was added by listing cases with this label - list_cases_output = _run_cmd(f""" + list_cases_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( list_cases_output, [ f"Retrieving test cases with label title '{single_case_label_title}'...", "Found 1 matching test case(s):", - f"Case ID: {single_case_id}" - ] + f"Case ID: {single_case_id}", + ], ) finally: # Clean up: delete the test label - _run_cmd(f""" + _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) def test_labels_tests_full_workflow(self): """Test complete workflow of test label operations""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) test_label_title = f"e2e-test-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{test_label_title}" - """) - _assert_contains( - add_label_output, - [ - f"Adding label '{test_label_title}'...", - "Successfully added label:" - ] + """ ) + _assert_contains(add_label_output, [f"Adding label '{test_label_title}'...", "Successfully added label:"]) # Extract label ID for cleanup import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) @@ -1491,126 +1476,122 @@ def test_labels_tests_full_workflow(self): test_ids = ["266149", "266151"] # Real test IDs for functional testing # Test 1: Add labels to tests using --test-ids - add_tests_output = _run_cmd(f""" + add_tests_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-ids "{','.join(test_ids)}" \\ --title "{test_label_title}" - """) - - _assert_contains( - add_tests_output, - [ - f"Adding label '{test_label_title}' to {len(test_ids)} test(s)..." - ] + """ ) + _assert_contains(add_tests_output, [f"Adding label '{test_label_title}' to {len(test_ids)} test(s)..."]) + # Test 2: Add labels to tests using CSV file import os + csv_file_path = os.path.join(os.path.dirname(__file__), "sample_csv", "test_ids.csv") - - add_tests_csv_output = _run_cmd(f""" + + add_tests_csv_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-id-file "{csv_file_path}" \\ --title "{test_label_title}" - """) - + """ + ) + _assert_contains( add_tests_csv_output, - [ - "Loaded 2 test ID(s) from file", - f"Adding label '{test_label_title}' to 2 test(s)..." - ] + ["Loaded 2 test ID(s) from file", f"Adding label '{test_label_title}' to 2 test(s)..."], ) # Test 3: Get test labels for specific tests - get_test_labels_output = _run_cmd(f""" + get_test_labels_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests get \\ --test-ids "{','.join(test_ids)}" - """) + """ + ) _assert_contains( - get_test_labels_output, - [ - f"Retrieving labels for {len(test_ids)} test(s)...", - "Test label information:" - ] + get_test_labels_output, [f"Retrieving labels for {len(test_ids)} test(s)...", "Test label information:"] ) finally: # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) def test_labels_tests_validation_errors(self): """Test validation errors for test label commands""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Test title too long (21 characters exceeds 20 character limit) long_title = f"this-is-a-very-long-title-{random_suffix}" # This will be > 20 chars - title_error_output, return_code = _run_cmd_allow_failure(f""" + title_error_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-ids "266149" \\ --title "{long_title}" - """) - assert return_code != 0 - _assert_contains( - title_error_output, - ["exceeds 20 character limit and will be skipped."] + """ ) + assert return_code != 0 + _assert_contains(title_error_output, ["exceeds 20 character limit and will be skipped."]) # Test missing test-ids and file valid_title = f"test-{random_suffix}"[:20] # Ensure valid length - missing_ids_output, return_code = _run_cmd_allow_failure(f""" + missing_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --title "{valid_title}" - """) - assert return_code != 0 - _assert_contains( - missing_ids_output, - ["Error: Either --test-ids or --test-id-file must be provided."] + """ ) + assert return_code != 0 + _assert_contains(missing_ids_output, ["Error: Either --test-ids or --test-id-file must be provided."]) # Test invalid label IDs format in list command - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests list \\ --run-id "1" \\ --ids "invalid,ids" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) def test_labels_tests_help_commands(self): """Test help output for test label commands""" - + # Test main tests help tests_help_output = _run_cmd("trcli labels tests --help") _assert_contains( @@ -1620,9 +1601,9 @@ def test_labels_tests_help_commands(self): "Manage labels for tests", "Commands:", "add", - "list", - "get" - ] + "list", + "get", + ], ) # Test tests add help @@ -1634,8 +1615,8 @@ def test_labels_tests_help_commands(self): "Add label(s) to tests", "--test-ids", "--test-id-file", - "--title" - ] + "--title", + ], ) # Test tests list help @@ -1646,35 +1627,27 @@ def test_labels_tests_help_commands(self): "Usage: trcli labels tests list [OPTIONS]", "List tests filtered by label ID from specific runs", "--run-id", - "--ids" - ] + "--ids", + ], ) # Test tests get help tests_get_help_output = _run_cmd("trcli labels tests get --help") _assert_contains( tests_get_help_output, - [ - "Usage: trcli labels tests get [OPTIONS]", - "Get the labels of tests using test IDs", - "--test-id" - ] + ["Usage: trcli labels tests get [OPTIONS]", "Get the labels of tests using test IDs", "--test-id"], ) def test_references_cases_help_commands(self): """Test references cases help commands""" - + # Test main references help references_help_output = _run_cmd("trcli references --help") _assert_contains( references_help_output, - [ - "Usage: trcli references [OPTIONS] COMMAND [ARGS]...", - "Manage references in TestRail", - "cases" - ] + ["Usage: trcli references [OPTIONS] COMMAND [ARGS]...", "Manage references in TestRail", "cases"], ) - + # Test references cases help cases_help_output = _run_cmd("trcli references cases --help") _assert_contains( @@ -1683,23 +1656,18 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases [OPTIONS] COMMAND [ARGS]...", "Manage references for test cases", "add", - "update", - "delete" - ] + "update", + "delete", + ], ) - + # Test references cases add help add_help_output = _run_cmd("trcli references cases add --help") _assert_contains( add_help_output, - [ - "Usage: trcli references cases add [OPTIONS]", - "Add references to test cases", - "--case-ids", - "--refs" - ] + ["Usage: trcli references cases add [OPTIONS]", "Add references to test cases", "--case-ids", "--refs"], ) - + # Test references cases update help update_help_output = _run_cmd("trcli references cases update --help") _assert_contains( @@ -1708,10 +1676,10 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases update [OPTIONS]", "Update references on test cases by replacing existing ones", "--case-ids", - "--refs" - ] + "--refs", + ], ) - + # Test references cases delete help delete_help_output = _run_cmd("trcli references cases delete --help") _assert_contains( @@ -1720,66 +1688,66 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases delete [OPTIONS]", "Delete all or specific references from test cases", "--case-ids", - "--refs" - ] + "--refs", + ], ) def test_references_cases_error_scenarios(self): """Test references cases error scenarios""" - + # Test invalid test case IDs format - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "invalid,ids" \\ --refs "REQ-1" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) - + # Test empty references - empty_refs_output, return_code = _run_cmd_allow_failure(f""" + empty_refs_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "321" \\ --refs ",,," - """) - assert return_code != 0 - _assert_contains( - empty_refs_output, - ["Error: No valid references provided."] + """ ) - + assert return_code != 0 + _assert_contains(empty_refs_output, ["Error: No valid references provided."]) + # Test references too long (over 2000 characters) - long_refs = ','.join([f'REQ-{i}' * 100 for i in range(10)]) # Create very long references - long_refs_output, return_code = _run_cmd_allow_failure(f""" + long_refs = ",".join([f"REQ-{i}" * 100 for i in range(10)]) # Create very long references + long_refs_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "321" \\ --refs "{long_refs}" - """) - assert return_code != 0 - _assert_contains( - long_refs_output, - ["exceeds 2000 character limit"] + """ ) + assert return_code != 0 + _assert_contains(long_refs_output, ["exceeds 2000 character limit"]) # ==================== ASSIGN FEATURE TESTS ==================== - + def test_assign_failures_single_user(self): """Test --assign feature with single user""" # Note: This test assumes a valid TestRail user exists in the instance # In a real environment, you would use actual user emails - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1787,7 +1755,8 @@ def test_assign_failures_single_user(self): --title "[CLI-E2E-Tests] Assign Failures - Single User" \\ --assign "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1795,13 +1764,14 @@ def test_assign_failures_single_user(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_multiple_users(self): """Test --assign feature with multiple users (round-robin assignment)""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1809,7 +1779,8 @@ def test_assign_failures_multiple_users(self): --title "[CLI-E2E-Tests] Assign Failures - Multiple Users" \\ --assign "trcli@gurock.io,trcli@testrail.com" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1817,13 +1788,14 @@ def test_assign_failures_multiple_users(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_short_form(self): """Test --assign feature using -a short form""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1831,7 +1803,8 @@ def test_assign_failures_short_form(self): --title "[CLI-E2E-Tests] Assign Failures - Short Form" \\ -a "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1839,35 +1812,38 @@ def test_assign_failures_short_form(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_without_assign_option(self): """Test that normal operation works without --assign option""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] No Assign Option" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ "Auto-assign failures: No", "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) # Should NOT contain assignment message assert "Assigning failed results:" not in output def test_assign_failures_invalid_user(self): """Test --assign feature with invalid user email""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1875,19 +1851,16 @@ def test_assign_failures_invalid_user(self): --title "[CLI-E2E-Tests] Assign Failures - Invalid User" \\ --assign "invalid.user@nonexistent.com" \\ -f "reports_junit/assign_test_failures.xml" - """) - - assert return_code != 0 - _assert_contains( - output, - [ - "Error: User not found: invalid.user@nonexistent.com" - ] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: User not found: invalid.user@nonexistent.com"]) + def test_assign_failures_mixed_valid_invalid_users(self): """Test --assign feature with mix of valid and invalid users""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1895,19 +1868,16 @@ def test_assign_failures_mixed_valid_invalid_users(self): --title "[CLI-E2E-Tests] Assign Failures - Mixed Users" \\ --assign "trcli@gurock.io,invalid.user@nonexistent.com" \\ -f "reports_junit/assign_test_failures.xml" - """) - - assert return_code != 0 - _assert_contains( - output, - [ - "Error: User not found: invalid.user@nonexistent.com" - ] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: User not found: invalid.user@nonexistent.com"]) + def test_assign_failures_whitespace_handling(self): """Test --assign feature with whitespace in email list""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1915,7 +1885,8 @@ def test_assign_failures_whitespace_handling(self): --title "[CLI-E2E-Tests] Assign Failures - Whitespace" \\ --assign " trcli@gurock.io , trcli@testrail.com " \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1923,42 +1894,41 @@ def test_assign_failures_whitespace_handling(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_help_documentation(self): """Test that --assign option appears in help documentation""" help_output = _run_cmd("trcli parse_junit --help") _assert_contains( - help_output, - [ - "-a, --assign", - "Comma-separated list of user emails to assign failed", - "test results to." - ] + help_output, ["-a, --assign", "Comma-separated list of user emails to assign", "failed test results to."] ) def test_assign_failures_with_existing_run(self): """Test --assign feature when updating an existing run""" # First create a run - create_output = _run_cmd(f""" + create_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Assign Failures - Update Run" \\ -f "reports_junit/generic_ids_auto.xml" - """) - + """ + ) + # Extract run ID from output import re - run_id_match = re.search(r'runs/view/(\d+)', create_output) + + run_id_match = re.search(r"runs/view/(\d+)", create_output) assert run_id_match, "Could not extract run ID from output" run_id = run_id_match.group(1) - + # Update the run with failed tests and assignment - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1967,14 +1937,14 @@ def test_assign_failures_with_existing_run(self): --title "[CLI-E2E-Tests] Assign Failures - Update Run" \\ --assign "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( update_output, [ "Auto-assign failures: Yes (trcli@gurock.io)", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view/{run_id}", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) - \ No newline at end of file diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 3dcd196..fa1095f 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -7,7 +7,9 @@ from trcli.cli import Environment from trcli.constants import ( ProjectErrors, - FAULT_MAPPING, OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID, + FAULT_MAPPING, + OLD_SYSTEM_NAME_AUTOMATION_ID, + UPDATED_SYSTEM_NAME_AUTOMATION_ID, ) from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailCase, ProjectData @@ -33,7 +35,7 @@ def __init__( environment.case_fields, environment.run_description, environment.result_fields, - environment.section_id + environment.section_id, ) self.suites_data_from_provider = self.data_provider.suites_input self.response_verifier = ApiResponseVerify(verify) @@ -48,11 +50,11 @@ def check_automation_id_field(self, project_id: int) -> Union[str, None]: if not response.error_message: fields: List = response.response_text automation_id_field = next( - filter( + filter( lambda x: x["system_name"] in [OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID], - fields + fields, ), - None + None, ) if automation_id_field: if automation_id_field["is_active"] is False: @@ -79,11 +81,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project """ projects_data, error = self.__get_all_projects() if not error: - available_projects = [ - project - for project in projects_data - if project["name"] == project_name - ] + available_projects = [project for project in projects_data if project["name"] == project_name] if len(available_projects) == 1: return ProjectData( @@ -94,9 +92,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project elif len(available_projects) > 1: if project_id in [project["id"] for project in available_projects]: project_index = [ - index - for index, project in enumerate(available_projects) - if project["id"] == project_id + index for index, project in enumerate(available_projects) if project["id"] == project_id ][0] return ProjectData( project_id=int(available_projects[project_index]["id"]), @@ -131,11 +127,7 @@ def check_suite_id(self, project_id: int) -> Tuple[bool, str]: suite_id = self.suites_data_from_provider.suite_id suites_data, error = self.__get_all_suites(project_id) if not error: - available_suites = [ - suite - for suite in suites_data - if suite["id"] == suite_id - ] + available_suites = [suite for suite in suites_data if suite["id"] == suite_id] return ( (True, "") if len(available_suites) > 0 @@ -207,9 +199,7 @@ def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: response = self.client.send_post(f"add_suite/{project_id}", body) if not response.error_message: responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): + if not self.response_verifier.verify_returned_data(body, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] break @@ -224,9 +214,11 @@ def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: } for response in responses ] - self.data_provider.update_data(suite_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" + ( + self.data_provider.update_data(suite_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) return returned_resources, error_message def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: @@ -246,20 +238,24 @@ def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: if self.environment.section_id: if section.section_id in sections_by_id.keys(): section_json = sections_by_id[section.section_id] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) else: missing_test_sections = True if section.name in sections_by_name.keys(): section_json = sections_by_name[section.name] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) else: missing_test_sections = True self.data_provider.update_data(section_data=section_data) @@ -281,9 +277,7 @@ def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: response = self.client.send_post(f"add_section/{project_id}", body) if not response.error_message: responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): + if not self.response_verifier.verify_returned_data(body, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] break @@ -298,9 +292,11 @@ def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: } for response in responses ] - self.data_provider.update_data(section_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" + ( + self.data_provider.update_data(section_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) return returned_resources, error_message def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: @@ -327,12 +323,14 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: aut_id = test_case.custom_automation_id if aut_id in test_cases_by_aut_id.keys(): case = test_cases_by_aut_id[aut_id] - test_case_data.append({ - "case_id": case["id"], - "section_id": case["section_id"], - "title": case["title"], - OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id - }) + test_case_data.append( + { + "case_id": case["id"], + "section_id": case["section_id"], + "title": case["title"], + OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id, + } + ) else: missing_cases_number += 1 self.data_provider.update_data(case_data=test_case_data) @@ -386,25 +384,25 @@ def add_cases(self) -> Tuple[List[dict], str]: { "case_id": response.response_text["id"], "section_id": response.response_text["section_id"], - "title": response.response_text["title"] + "title": response.response_text["title"], } for response in responses ] return returned_resources, error_message def add_run( - self, - project_id: int, - run_name: str, - milestone_id: int = None, - start_date: str = None, - end_date: str = None, - plan_id: int = None, - config_ids: List[int] = None, - assigned_to_id: int = None, - include_all: bool = False, - refs: str = None, - case_ids: List[int] = None, + self, + project_id: int, + run_name: str, + milestone_id: int = None, + start_date: str = None, + end_date: str = None, + plan_id: int = None, + config_ids: List[int] = None, + assigned_to_id: int = None, + include_all: bool = False, + refs: str = None, + case_ids: List[int] = None, ) -> Tuple[int, str]: """ Creates a new test run. @@ -432,7 +430,7 @@ def add_run( "name": add_run_data["name"], "suite_id": add_run_data["suite_id"], "config_ids": config_ids, - "runs": [add_run_data] + "runs": [add_run_data], } else: entry_data = add_run_data @@ -440,8 +438,16 @@ def add_run( run_id = response.response_text["runs"][0]["id"] return run_id, response.error_message - def update_run(self, run_id: int, run_name: str, start_date: str = None, - end_date: str = None, milestone_id: int = None, refs: str = None, refs_action: str = 'add') -> Tuple[dict, str]: + def update_run( + self, + run_id: int, + run_name: str, + start_date: str = None, + end_date: str = None, + milestone_id: int = None, + refs: str = None, + refs_action: str = "add", + ) -> Tuple[dict, str]: """ Updates an existing run :run_id: run id @@ -453,12 +459,13 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, run_response = self.client.send_get(f"get_run/{run_id}") if run_response.error_message: return None, run_response.error_message - + existing_description = run_response.response_text.get("description", "") existing_refs = run_response.response_text.get("refs", "") - add_run_data = self.data_provider.add_run(run_name, start_date=start_date, - end_date=end_date, milestone_id=milestone_id) + add_run_data = self.data_provider.add_run( + run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id + ) add_run_data["description"] = existing_description # Retain the current description # Handle references based on action @@ -473,7 +480,7 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, report_case_ids = add_run_data["case_ids"] joint_case_ids = list(set(report_case_ids + run_case_ids)) add_run_data["case_ids"] = joint_case_ids - + plan_id = run_response.response_text["plan_id"] config_ids = run_response.response_text["config_ids"] if not plan_id: @@ -505,29 +512,29 @@ def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> """ if not existing_refs: existing_refs = "" - - if action == 'update': + + if action == "update": # Replace all references with new ones return new_refs - elif action == 'delete': + elif action == "delete": if not new_refs: # Delete all references return "" else: # Delete specific references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - refs_to_delete = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + refs_to_delete = [ref.strip() for ref in new_refs.split(",") if ref.strip()] updated_list = [ref for ref in existing_list if ref not in refs_to_delete] - return ','.join(updated_list) + return ",".join(updated_list) else: # action == 'add' (default) # Add new references to existing ones if not existing_refs: return new_refs - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - new_list = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + new_list = [ref.strip() for ref in new_refs.split(",") if ref.strip()] # Avoid duplicates combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] - return ','.join(combined_list) + return ",".join(combined_list) def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: """ @@ -540,11 +547,11 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic run_response = self.client.send_get(f"get_run/{run_id}") if run_response.error_message: return None, [], [], run_response.error_message - + existing_refs = run_response.response_text.get("refs", "") or "" - + # Parse existing and new references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] # Deduplicate input references new_list = [] seen = set() @@ -553,28 +560,33 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic if ref_clean and ref_clean not in seen: new_list.append(ref_clean) seen.add(ref_clean) - + # Determine which references are new vs duplicates added_refs = [ref for ref in new_list if ref not in existing_list] skipped_refs = [ref for ref in new_list if ref in existing_list] - + # If no new references to add, return current state if not added_refs: return run_response.response_text, added_refs, skipped_refs, None - + # Combine references combined_list = existing_list + added_refs - combined_refs = ','.join(combined_list) - + combined_refs = ",".join(combined_list) + if len(combined_refs) > 250: - return None, [], [], f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit" - + return ( + None, + [], + [], + f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit", + ) + update_data = {"refs": combined_refs} - + # Determine the correct API endpoint based on plan membership plan_id = run_response.response_text.get("plan_id") config_ids = run_response.response_text.get("config_ids") - + if not plan_id: # Standalone run update_response = self.client.send_post(f"update_run/{run_id}", update_data) @@ -586,7 +598,7 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic plan_response = self.client.send_get(f"get_plan/{plan_id}") if plan_response.error_message: return None, [], [], f"Failed to get plan details: {plan_response.error_message}" - + # Find the entry_id for this run entry_id = None for entry in plan_response.response_text.get("entries", []): @@ -596,19 +608,21 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic break if entry_id: break - + if not entry_id: return None, [], [], f"Could not find plan entry for run {run_id}" - + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) - + if update_response.error_message: return None, [], [], update_response.error_message - + updated_run_response = self.client.send_get(f"get_run/{run_id}") return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message - def update_existing_case_references(self, case_id: int, junit_refs: str, strategy: str = "append") -> Tuple[bool, str, List[str], List[str]]: + def update_existing_case_references( + self, case_id: int, junit_refs: str, strategy: str = "append" + ) -> Tuple[bool, str, List[str], List[str]]: """ Update existing case references with values from JUnit properties. :param case_id: ID of the test case @@ -618,62 +632,69 @@ def update_existing_case_references(self, case_id: int, junit_refs: str, strateg """ if not junit_refs or not junit_refs.strip(): return True, None, [], [] # No references to process - + # Parse and validate JUnit references, deduplicating input junit_ref_list = [] seen = set() - for ref in junit_refs.split(','): + for ref in junit_refs.split(","): ref_clean = ref.strip() if ref_clean and ref_clean not in seen: junit_ref_list.append(ref_clean) seen.add(ref_clean) - + if not junit_ref_list: return False, "No valid references found in JUnit property", [], [] - + # Get current case data case_response = self.client.send_get(f"get_case/{case_id}") if case_response.error_message: return False, case_response.error_message, [], [] - - existing_refs = case_response.response_text.get('refs', '') or '' - + + existing_refs = case_response.response_text.get("refs", "") or "" + if strategy == "replace": # Replace strategy: use JUnit refs as-is - new_refs = ','.join(junit_ref_list) + new_refs = ",".join(junit_ref_list) added_refs = junit_ref_list skipped_refs = [] else: # Append strategy: combine with existing refs, avoiding duplicates - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] - + existing_ref_list = ( + [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] + ) + # Determine which references are new vs duplicates added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] - + # If no new references to add, return current state if not added_refs: return True, None, added_refs, skipped_refs - + # Combine references combined_list = existing_ref_list + added_refs - new_refs = ','.join(combined_list) - + new_refs = ",".join(combined_list) + # Validate 2000 character limit for test case references if len(new_refs) > 2000: - return False, f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", [], [] - + return ( + False, + f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", + [], + [], + ) + # Update the case update_data = {"refs": new_refs} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.error_message: return False, update_response.error_message, [], [] - + return True, None, added_refs, skipped_refs def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): - """ Getting test result id and upload attachments for it. """ + """Getting test result id and upload attachments for it.""" tests_in_run, error = self.__get_all_tests_in_run(run_id) if not error: for report_result in report_results: @@ -698,26 +719,18 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: responses = [] error_message = "" # Get pre-validated user IDs if available - user_ids = getattr(self.environment, '_validated_user_ids', []) - - add_results_data_chunks = self.data_provider.add_results_for_cases( - self.environment.batch_size, user_ids - ) + user_ids = getattr(self.environment, "_validated_user_ids", []) + + add_results_data_chunks = self.data_provider.add_results_for_cases(self.environment.batch_size, user_ids) # Get assigned count from data provider - assigned_count = getattr(self.data_provider, '_assigned_count', 0) - - results_amount = sum( - [len(results["results"]) for results in add_results_data_chunks] - ) + assigned_count = getattr(self.data_provider, "_assigned_count", 0) - with self.environment.get_progress_bar( - results_amount=results_amount, prefix="Adding results" - ) as progress_bar: + results_amount = sum([len(results["results"]) for results in add_results_data_chunks]) + + with self.environment.get_progress_bar(results_amount=results_amount, prefix="Adding results") as progress_bar: with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: futures = { - executor.submit( - self.client.send_post, f"add_results_for_cases/{run_id}", body - ): body + executor.submit(self.client.send_post, f"add_results_for_cases/{run_id}", body): body for body in add_results_data_chunks } responses, error_message = self.handle_futures( @@ -730,11 +743,7 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: # Iterate through futures to get all responses from done tasks (not cancelled) responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) responses = [response.response_text for response in responses] - results = [ - result - for results_list in responses - for result in results_list - ] + results = [result for results_list in responses for result in results_list] report_results_w_attachments = [] for results_data_chunk in add_results_data_chunks: for test_result in results_data_chunk["results"]: @@ -744,22 +753,22 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: attachments_count = 0 for result in report_results_w_attachments: attachments_count += len(result["attachments"]) - self.environment.log(f"Uploading {attachments_count} attachments " - f"for {len(report_results_w_attachments)} test results.") + self.environment.log( + f"Uploading {attachments_count} attachments " f"for {len(report_results_w_attachments)} test results." + ) self.upload_attachments(report_results_w_attachments, results, run_id) else: self.environment.log(f"No attachments found to upload.") - + # Log assignment results if assignment was performed if user_ids: - total_failed = getattr(self.data_provider, '_total_failed_count', assigned_count) + total_failed = getattr(self.data_provider, "_total_failed_count", assigned_count) if assigned_count > 0: self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") else: self.environment.log(f"Assigning failed results: 0/0, Done.") - - return responses, error_message, progress_bar.n + return responses, error_message, progress_bar.n def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, str]: responses = [] @@ -776,9 +785,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st if action_string == "add_case": arguments = arguments.to_dict() arguments.pop("case_id") - if not self.response_verifier.verify_returned_data( - arguments, response.response_text - ): + if not self.response_verifier.verify_returned_data(arguments, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] self.__cancel_running_futures(futures, action_string) @@ -786,9 +793,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st progress_bar.update(1) else: error_message = response.error_message - self.environment.log( - f"\nError during {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nError during {action_string}. Trying to cancel scheduled tasks.") self.__cancel_running_futures(futures, action_string) break else: @@ -826,9 +831,7 @@ def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: responses = [] error_message = "" for section in added_sections: - response = self.client.send_post( - f"delete_section/{section['section_id']}", payload={} - ) + response = self.client.send_post(f"delete_section/{section['section_id']}", payload={}) if not response.error_message: responses.append(response.response_text) else: @@ -868,45 +871,52 @@ def retrieve_results_after_cancelling(futures) -> list: def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: """ Validates a user email and returns the user ID if valid. - + :param email: User email to validate :returns: Tuple with user ID (or None if not found) and error message """ if not email or not email.strip(): return None, "Email cannot be empty" - + email = email.strip() # Use proper URL encoding for the query parameter import urllib.parse + encoded_email = urllib.parse.quote_plus(email) response = self.client.send_get(f"get_user_by_email&email={encoded_email}") - + if response.error_message: # Map TestRail's email validation error to our expected format if "Field :email is not a valid email address" in response.error_message: return None, f"User not found: {email}" return None, response.error_message - + if response.status_code == 200: try: user_data = response.response_text - if isinstance(user_data, dict) and 'id' in user_data: - return user_data['id'], "" + if isinstance(user_data, dict) and "id" in user_data: + return user_data["id"], "" else: return None, f"Invalid response format for user: {email}" except (KeyError, TypeError): return None, f"Invalid response format for user: {email}" elif response.status_code == 400: # Check if the response contains the email validation error - if (hasattr(response, 'response_text') and response.response_text and - isinstance(response.response_text, dict) and - "Field :email is not a valid email address" in str(response.response_text.get('error', ''))): + if ( + hasattr(response, "response_text") + and response.response_text + and isinstance(response.response_text, dict) + and "Field :email is not a valid email address" in str(response.response_text.get("error", "")) + ): return None, f"User not found: {email}" return None, f"User not found: {email}" else: # For other status codes, check if it's the email validation error - if (hasattr(response, 'response_text') and response.response_text and - "Field :email is not a valid email address" in str(response.response_text)): + if ( + hasattr(response, "response_text") + and response.response_text + and "Field :email is not a valid email address" in str(response.response_text) + ): return None, f"User not found: {email}" return None, f"API error (status {response.status_code}) when validating user: {email}" @@ -925,9 +935,7 @@ def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: return response def __cancel_running_futures(self, futures, action_string): - self.environment.log( - f"\nAborting: {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nAborting: {action_string}. Trying to cancel scheduled tasks.") for future in futures: future.cancel() @@ -936,33 +944,33 @@ def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], s Get all cases from all pages """ if suite_id is None: - return self.__get_all_entities('cases', f"get_cases/{project_id}") + return self.__get_all_entities("cases", f"get_cases/{project_id}") else: - return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") + return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}") def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ Get all sections from all pages """ - return self.__get_all_entities('sections', f"get_sections/{project_id}&suite_id={suite_id}") + return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}") def __get_all_tests_in_run(self, run_id=None) -> Tuple[List[dict], str]: """ Get all tests from all pages """ - return self.__get_all_entities('tests', f"get_tests/{run_id}") + return self.__get_all_entities("tests", f"get_tests/{run_id}") def __get_all_projects(self) -> Tuple[List[dict], str]: """ Get all projects from all pages """ - return self.__get_all_entities('projects', f"get_projects") + return self.__get_all_entities("projects", f"get_projects") def __get_all_suites(self, project_id) -> Tuple[List[dict], str]: """ Get all suites from all pages """ - return self.__get_all_entities('suites', f"get_suites/{project_id}") + return self.__get_all_entities("suites", f"get_suites/{project_id}") def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[Dict], str]: """ @@ -979,9 +987,7 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ return response.response_text, response.error_message # Check if response is a string (JSON parse failed) if isinstance(response.response_text, str): - error_msg = FAULT_MAPPING["invalid_api_response"].format( - error_details=response.response_text[:200] - ) + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) return [], error_msg # Endpoints with pagination entities = entities + response.response_text[entity] @@ -1001,9 +1007,8 @@ def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: :param title: Title of the label (max 20 characters) :returns: Tuple with created label data and error string """ - # Use multipart/form-data like the working CURL command - files = {'title': (None, title)} - response = self.client.send_post(f"add_label/{project_id}", payload=None, files=files) + payload = {"title": title} + response = self.client.send_post(f"add_label/{project_id}", payload=payload) return response.response_text, response.error_message def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict, str]: @@ -1014,12 +1019,8 @@ def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict :param title: New title for the label (max 20 characters) :returns: Tuple with updated label data and error string """ - # Use multipart/form-data like add_label - files = { - 'project_id': (None, str(project_id)), - 'title': (None, title) # Field name is 'title' (no colon) for form data - } - response = self.client.send_post(f"update_label/{label_id}", payload=None, files=files) + payload = {"project_id": project_id, "title": title} + response = self.client.send_post(f"update_label/{label_id}", payload=payload) return response.response_text, response.error_message def get_label(self, label_id: int) -> Tuple[dict, str]: @@ -1044,11 +1045,11 @@ def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tupl params.append(f"offset={offset}") if limit != 250: params.append(f"limit={limit}") - + url = f"get_labels/{project_id}" if params: url += "&" + "&".join(params) - + response = self.client.send_get(url) return response.response_text, response.error_message @@ -1068,18 +1069,17 @@ def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: :param label_ids: List of label IDs to delete :returns: Tuple with success status and error string """ - # Send as form data with JSON array format - import json - label_ids_json = json.dumps(label_ids) - files = {"label_ids": (None, label_ids_json)} - response = self.client.send_post("delete_labels", payload=None, files=files) + payload = {"label_ids": label_ids} + response = self.client.send_post("delete_labels", payload=payload) success = response.status_code == 200 return success, response.error_message - def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, suite_id: int = None) -> Tuple[dict, str]: + def add_labels_to_cases( + self, case_ids: List[int], title: str, project_id: int, suite_id: int = None + ) -> Tuple[dict, str]: """ Add a label to multiple test cases - + :param case_ids: List of test case IDs :param title: Label title (max 20 characters) :param project_id: Project ID for validation @@ -1087,122 +1087,113 @@ def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, :returns: Tuple with response data and error string """ # Initialize results structure - results = { - 'successful_cases': [], - 'failed_cases': [], - 'max_labels_reached': [], - 'case_not_found': [] - } - + results = {"successful_cases": [], "failed_cases": [], "max_labels_reached": [], "case_not_found": []} + # Check if project is multi-suite by getting all cases without suite_id all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) if error_message: return results, error_message - + # Check if project has multiple suites suite_ids = set() for case in all_cases_no_suite: - if 'suite_id' in case and case['suite_id']: - suite_ids.add(case['suite_id']) - + if "suite_id" in case and case["suite_id"]: + suite_ids.add(case["suite_id"]) + # If project has multiple suites and no suite_id provided, require it if len(suite_ids) > 1 and suite_id is None: return results, "This project is multisuite, suite id is required" - + # Get all cases to validate that the provided case IDs exist all_cases, error_message = self.__get_all_cases(project_id, suite_id) if error_message: return results, error_message - + # Create a set of existing case IDs for quick lookup - existing_case_ids = {case['id'] for case in all_cases} - + existing_case_ids = {case["id"] for case in all_cases} + # Validate case IDs and separate valid from invalid ones invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] - + # Record invalid case IDs for case_id in invalid_case_ids: - results['case_not_found'].append(case_id) - + results["case_not_found"].append(case_id) + # If no valid case IDs, return early if not valid_case_ids: return results, "" - + # Check if label exists or create it existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - + # Find existing label with the same title label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") break - + # Create label if it doesn't exist if label_id is None: label_data, error_message = self.add_label(project_id, title) if error_message: return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + # Collect case data and validate constraints cases_to_update = [] for case_id in valid_case_ids: # Get current case to check existing labels case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: - results['failed_cases'].append({ - 'case_id': case_id, - 'error': f"Could not retrieve case {case_id}: {case_response.error_message}" - }) + results["failed_cases"].append( + {"case_id": case_id, "error": f"Could not retrieve case {case_id}: {case_response.error_message}"} + ) continue - + case_data = case_response.response_text - current_labels = case_data.get('labels', []) - + current_labels = case_data.get("labels", []) + # Check if label already exists on this case - if any(label.get('id') == label_id for label in current_labels): - results['successful_cases'].append({ - 'case_id': case_id, - 'message': f"Label '{title}' already exists on case {case_id}" - }) + if any(label.get("id") == label_id for label in current_labels): + results["successful_cases"].append( + {"case_id": case_id, "message": f"Label '{title}' already exists on case {case_id}"} + ) continue - + # Check maximum labels limit (10) if len(current_labels) >= 10: - results['max_labels_reached'].append(case_id) + results["max_labels_reached"].append(case_id) continue - + # Prepare case for update - existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] + existing_label_ids = [label.get("id") for label in current_labels if label.get("id")] updated_label_ids = existing_label_ids + [label_id] - cases_to_update.append({ - 'case_id': case_id, - 'labels': updated_label_ids - }) - + cases_to_update.append({"case_id": case_id, "labels": updated_label_ids}) + # Update cases using appropriate endpoint if len(cases_to_update) == 1: # Single case: use update_case/{case_id} case_info = cases_to_update[0] - case_update_data = {'labels': case_info['labels']} - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) elif len(cases_to_update) > 1: # Multiple cases: use update_cases/{suite_id} # Need to determine suite_id from the cases @@ -1210,62 +1201,72 @@ def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, if not case_suite_id: # Get suite_id from the first case if not provided first_case = all_cases[0] if all_cases else None - case_suite_id = first_case.get('suite_id') if first_case else None - + case_suite_id = first_case.get("suite_id") if first_case else None + if not case_suite_id: # Fall back to individual updates if no suite_id available for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) else: # Batch update using update_cases/{suite_id} batch_update_data = { - 'case_ids': [case_info['case_id'] for case_info in cases_to_update], - 'labels': cases_to_update[0]['labels'] # Assuming same labels for all cases + "case_ids": [case_info["case_id"] for case_info in cases_to_update], + "labels": cases_to_update[0]["labels"], # Assuming same labels for all cases } - + batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) - + if batch_response.status_code == 200: for case_info in cases_to_update: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: # If batch update fails, fall back to individual updates for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) - + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + return results, "" - def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: + def get_cases_by_label( + self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None + ) -> Tuple[List[dict], str]: """ Get test cases filtered by label ID or title - + :param project_id: Project ID :param suite_id: Suite ID (optional) :param label_ids: List of label IDs to filter by @@ -1276,234 +1277,228 @@ def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: L all_cases, error_message = self.__get_all_cases(project_id, suite_id) if error_message: return [], error_message - + # If filtering by title, first get the label ID target_label_ids = label_ids or [] if label_title and not target_label_ids: labels_data, error_message = self.get_labels(project_id) if error_message: return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + if not target_label_ids: return [], "" # No label found is a valid case with 0 results - + # Filter cases that have any of the target labels matching_cases = [] for case in all_cases: - case_labels = case.get('labels', []) - case_label_ids = [label.get('id') for label in case_labels] - + case_labels = case.get("labels", []) + case_label_ids = [label.get("id") for label in case_labels] + # Check if any of the target label IDs are present in this case if any(label_id in case_label_ids for label_id in target_label_ids): matching_cases.append(case) - + return matching_cases, "" - def add_labels_to_tests(self, test_ids: List[int], titles: Union[str, List[str]], project_id: int) -> Tuple[dict, str]: + def add_labels_to_tests( + self, test_ids: List[int], titles: Union[str, List[str]], project_id: int + ) -> Tuple[dict, str]: """ Add labels to multiple tests - + :param test_ids: List of test IDs :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) :param project_id: Project ID for validation :returns: Tuple with response data and error string """ # Initialize results structure - results = { - 'successful_tests': [], - 'failed_tests': [], - 'max_labels_reached': [], - 'test_not_found': [] - } - + results = {"successful_tests": [], "failed_tests": [], "max_labels_reached": [], "test_not_found": []} + # Normalize titles to a list if isinstance(titles, str): title_list = [titles] else: title_list = titles - + # At this point, title_list should already be validated by the CLI # Just ensure we have clean titles title_list = [title.strip() for title in title_list if title.strip()] - + if not title_list: return {}, "No valid labels provided" - + # Validate test IDs by getting run information for each test valid_test_ids = [] for test_id in test_ids: # Get test information to validate it exists test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) continue - + test_data = test_response.response_text # Validate that the test belongs to the correct project - run_id = test_data.get('run_id') + run_id = test_data.get("run_id") if run_id: run_response = self.client.send_get(f"get_run/{run_id}") if run_response.status_code == 200: run_data = run_response.response_text - if run_data.get('project_id') == project_id: + if run_data.get("project_id") == project_id: valid_test_ids.append(test_id) else: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) else: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) else: - results['test_not_found'].append(test_id) - + results["test_not_found"].append(test_id) + # If no valid test IDs, return early if not valid_test_ids: return results, "" - + # Check if labels exist or create them existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - + # Process each title to get/create label IDs label_ids = [] label_id_to_title = {} # Map label IDs to their titles for title in title_list: # Find existing label with the same title label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") break - + # Create label if it doesn't exist if label_id is None: label_data, error_message = self.add_label(project_id, title) if error_message: return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + if label_id: label_ids.append(label_id) label_id_to_title[label_id] = title - + # Collect test data and validate constraints tests_to_update = [] for test_id in valid_test_ids: # Get current test to check existing labels test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results['failed_tests'].append({ - 'test_id': test_id, - 'error': f"Could not retrieve test {test_id}: {test_response.error_message}" - }) + results["failed_tests"].append( + {"test_id": test_id, "error": f"Could not retrieve test {test_id}: {test_response.error_message}"} + ) continue - + test_data = test_response.response_text - current_labels = test_data.get('labels', []) - current_label_ids = [label.get('id') for label in current_labels if label.get('id')] - + current_labels = test_data.get("labels", []) + current_label_ids = [label.get("id") for label in current_labels if label.get("id")] + new_label_ids = [] already_exists_titles = [] - + for label_id in label_ids: if label_id not in current_label_ids: new_label_ids.append(label_id) else: if label_id in label_id_to_title: already_exists_titles.append(label_id_to_title[label_id]) - + if not new_label_ids: - results['successful_tests'].append({ - 'test_id': test_id, - 'message': f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}" - }) + results["successful_tests"].append( + { + "test_id": test_id, + "message": f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}", + } + ) continue - + # Check maximum labels limit (10) if len(current_label_ids) + len(new_label_ids) > 10: - results['max_labels_reached'].append(test_id) + results["max_labels_reached"].append(test_id) continue - + # Prepare test for update updated_label_ids = current_label_ids + new_label_ids - + new_label_titles = [] for label_id in new_label_ids: if label_id in label_id_to_title: new_label_titles.append(label_id_to_title[label_id]) - - tests_to_update.append({ - 'test_id': test_id, - 'labels': updated_label_ids, - 'new_labels': new_label_ids, - 'new_label_titles': new_label_titles - }) - + + tests_to_update.append( + { + "test_id": test_id, + "labels": updated_label_ids, + "new_labels": new_label_ids, + "new_label_titles": new_label_titles, + } + ) + # Update tests using appropriate endpoint if len(tests_to_update) == 1: # Single test: use update_test/{test_id} test_info = tests_to_update[0] - test_update_data = {'labels': test_info['labels']} - + test_update_data = {"labels": test_info["labels"]} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - + if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) + new_label_titles = test_info.get("new_label_titles", []) new_label_count = len(new_label_titles) - + if new_label_count == 1: message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" elif new_label_count > 1: message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" else: message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) else: # Multiple tests: use individual updates to ensure each test gets its specific labels for test_info in tests_to_update: - test_update_data = {'labels': test_info['labels']} + test_update_data = {"labels": test_info["labels"]} update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - + if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) + new_label_titles = test_info.get("new_label_titles", []) new_label_count = len(new_label_titles) - + if new_label_count == 1: message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" elif new_label_count > 1: message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" else: message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) - + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) + return results, "" - def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None) -> Tuple[List[dict], str]: + def get_tests_by_label( + self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None + ) -> Tuple[List[dict], str]: """ Get tests filtered by label ID or title from specific runs - + :param project_id: Project ID :param label_ids: List of label IDs to filter by :param label_title: Label title to filter by @@ -1516,14 +1511,14 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label labels_data, error_message = self.get_labels(project_id) if error_message: return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + if not target_label_ids: return [], "" # No label found is a valid case with 0 results - + # Get runs for the project (either all runs or specific run IDs) if run_ids: # Use specific run IDs - validate they exist by getting run details @@ -1539,67 +1534,65 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label runs_response = self.client.send_get(f"get_runs/{project_id}") if runs_response.status_code != 200: return [], runs_response.error_message - + runs_data = runs_response.response_text - runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data - + runs = runs_data.get("runs", []) if isinstance(runs_data, dict) else runs_data + # Collect all tests from all runs matching_tests = [] for run in runs: - run_id = run.get('id') + run_id = run.get("id") if not run_id: continue - + # Get tests for this run tests_response = self.client.send_get(f"get_tests/{run_id}") if tests_response.status_code != 200: continue # Skip this run if we can't get tests - + tests_data = tests_response.response_text - tests = tests_data.get('tests', []) if isinstance(tests_data, dict) else tests_data - + tests = tests_data.get("tests", []) if isinstance(tests_data, dict) else tests_data + # Filter tests that have any of the target labels for test in tests: - test_labels = test.get('labels', []) - test_label_ids = [label.get('id') for label in test_labels] - + test_labels = test.get("labels", []) + test_label_ids = [label.get("id") for label in test_labels] + # Check if any of the target label IDs are present in this test if any(label_id in test_label_ids for label_id in target_label_ids): matching_tests.append(test) - + return matching_tests, "" def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: """ Get labels for specific tests - + :param test_ids: List of test IDs to get labels for :returns: Tuple with list of test label information and error string """ results = [] - + for test_id in test_ids: # Get test information test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results.append({ - 'test_id': test_id, - 'error': f"Test {test_id} not found or inaccessible", - 'labels': [] - }) + results.append({"test_id": test_id, "error": f"Test {test_id} not found or inaccessible", "labels": []}) continue - + test_data = test_response.response_text - test_labels = test_data.get('labels', []) - - results.append({ - 'test_id': test_id, - 'title': test_data.get('title', 'Unknown'), - 'status_id': test_data.get('status_id'), - 'labels': test_labels, - 'error': None - }) - + test_labels = test_data.get("labels", []) + + results.append( + { + "test_id": test_id, + "title": test_data.get("title", "Unknown"), + "status_id": test_data.get("status_id"), + "labels": test_labels, + "error": None, + } + ) + return results, "" # Test case reference management methods @@ -1614,15 +1607,15 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - + case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - + existing_refs = case_data.get("refs", "") or "" + # Parse existing references existing_ref_list = [] if existing_refs: - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - + existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Deduplicate input references while preserving order deduplicated_input = [] seen = set() @@ -1631,24 +1624,24 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool if ref_clean and ref_clean not in seen: deduplicated_input.append(ref_clean) seen.add(ref_clean) - + # Add new references (avoid duplicates with existing) all_refs = existing_ref_list.copy() for ref in deduplicated_input: if ref not in all_refs: all_refs.append(ref) - + # Join all references - new_refs_string = ','.join(all_refs) - + new_refs_string = ",".join(all_refs) + # Validate total character limit if len(new_refs_string) > 2000: return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - + # Update the test case with new references - update_data = {'refs': new_refs_string} + update_data = {"refs": new_refs_string} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: @@ -1669,18 +1662,18 @@ def update_case_references(self, case_id: int, references: List[str]) -> Tuple[b if ref_clean and ref_clean not in seen: deduplicated_refs.append(ref_clean) seen.add(ref_clean) - + # Join references - new_refs_string = ','.join(deduplicated_refs) - + new_refs_string = ",".join(deduplicated_refs) + # Validate total character limit if len(new_refs_string) > 2000: return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - + # Update the test case with new references - update_data = {'refs': new_refs_string} + update_data = {"refs": new_refs_string} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: @@ -1695,36 +1688,36 @@ def delete_case_references(self, case_id: int, specific_references: List[str] = """ if specific_references is None: # Delete all references by setting refs to empty string - update_data = {'refs': ''} + update_data = {"refs": ""} else: # First get the current test case to retrieve existing references case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - + case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - + existing_refs = case_data.get("refs", "") or "" + if not existing_refs: # No references to delete return True, "" - + # Parse existing references - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - + existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Deduplicate input references for efficient processing refs_to_delete = set(ref.strip() for ref in specific_references if ref.strip()) - + # Remove specific references remaining_refs = [ref for ref in existing_ref_list if ref not in refs_to_delete] - + # Join remaining references - new_refs_string = ','.join(remaining_refs) - update_data = {'refs': new_refs_string} - + new_refs_string = ",".join(remaining_refs) + update_data = {"refs": new_refs_string} + # Update the test case update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: From f4303d5b9386efb1642e490259b0223b545c08ce Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 10 Nov 2025 14:39:46 +0800 Subject: [PATCH 03/10] TRCLI-203 Added 503 and 504 error codes on retry list with exponential backoff for large projects --- tests/test_project_based_client.py | 175 +++++++++++------------------ trcli/api/api_client.py | 8 +- trcli/settings.py | 4 +- 3 files changed, 76 insertions(+), 111 deletions(-) diff --git a/tests/test_project_based_client.py b/tests/test_project_based_client.py index a0efc00..016495a 100644 --- a/tests/test_project_based_client.py +++ b/tests/test_project_based_client.py @@ -26,26 +26,21 @@ def project_based_client_data_provider(self, mocker): environment.file = "results.xml" environment.case_matcher = MatchersParser.AUTO - api_request_handler = mocker.patch( - "trcli.api.project_based_client.ApiRequestHandler" - ) + api_request_handler = mocker.patch("trcli.api.project_based_client.ApiRequestHandler") api_request_handler.get_project_data.return_value = ProjectData( project_id=environment.project_id, suite_mode=1, error_message="" ) api_request_handler.check_automation_id_field.return_value = None project_based_client = ProjectBasedClient( - environment=environment, suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_based_client.api_request_handler = api_request_handler yield environment, api_request_handler, project_based_client @pytest.mark.project_based_client - @pytest.mark.parametrize( - "timeout", [40, None], ids=["with_timeout", "without_timeout"] - ) - def test_instantiate_api_client( - self, timeout, project_based_client_data_provider, mocker - ): + @pytest.mark.parametrize("timeout", [40, None], ids=["with_timeout", "without_timeout"]) + def test_instantiate_api_client(self, timeout, project_based_client_data_provider, mocker): """The purpose of this test is to check that APIClient was instantiated properly and credential fields were set es expected.""" (_, api_request_handler, _) = project_based_client_data_provider @@ -57,24 +52,22 @@ def test_instantiate_api_client( environment.key = "test_api_key" if timeout: environment.timeout = timeout - timeout_expected_result = 30 if not timeout else timeout - project_based_client = ProjectBasedClient( - environment=environment, suite=junit_file_parser - ) + timeout_expected_result = 60 if not timeout else timeout + project_based_client = ProjectBasedClient(environment=environment, suite=junit_file_parser) api_client = project_based_client.instantiate_api_client() assert ( - api_client.username == environment.username + api_client.username == environment.username ), f"Expected username to be set to: {environment.username}, but got: {api_client.username} instead." assert ( - api_client.password == environment.password + api_client.password == environment.password ), f"Expected password to be set to: {environment.password}, but got: {api_client.password} instead." assert ( - api_client.api_key == environment.key + api_client.api_key == environment.key ), f"Expected api_key to be set to: {environment.key}, but got: {api_client.api_key} instead." assert ( - api_client.timeout == timeout_expected_result + api_client.timeout == timeout_expected_result ), f"Expected timeout to be set to: {timeout_expected_result}, but got: {api_client.timeout} instead." def test_resolve_project(self, project_based_client_data_provider): @@ -87,10 +80,10 @@ def test_resolve_project(self, project_based_client_data_provider): ) = project_based_client_data_provider project_based_client.resolve_project() - assert ( - project_based_client.project.project_id == environment.project_id - ), (f"Expected project_based_client.project to have {environment.project_id}," - f" but had {project_based_client.project.project_id}") + assert project_based_client.project.project_id == environment.project_id, ( + f"Expected project_based_client.project to have {environment.project_id}," + f" but had {project_based_client.project.project_id}" + ) @pytest.mark.project_based_client def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider): @@ -109,14 +102,10 @@ def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider) suite_mode=SuiteModes.single_suite ) + assert result_suite_id == suite_id, f"Expected suite_id: {suite_id} but got {result_suite_id} instead." + assert suite_added is False, f"Expected suite_added: {False} but got {suite_added} instead." assert ( - result_suite_id == suite_id - ), f"Expected suite_id: {suite_id} but got {result_suite_id} instead." - assert ( - suite_added is False - ), f"Expected suite_added: {False} but got {suite_added} instead." - assert ( - result_return_code == result_code + result_return_code == result_code ), f"Expected suite_id: {result_code} but got {result_return_code} instead." @pytest.mark.project_based_client @@ -126,14 +115,14 @@ def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider) ids=TEST_GET_SUITE_ID_PROMPTS_USER_IDS, ) def test_get_suite_id_multiple_suites_mode( - self, - user_response, - expected_suite_id, - expected_result_code, - expected_message, - suite_add_error, - project_based_client_data_provider, - mocker, + self, + user_response, + expected_suite_id, + expected_result_code, + expected_message, + suite_add_error, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check that user will be prompted to add suite is one is missing in TestRail. Depending on user response either information about addition of missing suite or error message @@ -160,9 +149,7 @@ def test_get_suite_id_multiple_suites_mode( else: project_based_client.api_request_handler.add_suites.return_value = ( [{"suite_id": expected_suite_id, "name": suite_name}], - FAULT_MAPPING["error_while_adding_suite"].format( - error_message="Failed to add suite." - ), + FAULT_MAPPING["error_while_adding_suite"].format(error_message="Failed to add suite."), ) project_based_client.api_request_handler.suites_data_from_provider.suite_id = None project_based_client.api_request_handler.suites_data_from_provider.name = suite_name @@ -177,18 +164,14 @@ def test_get_suite_id_multiple_suites_mode( if suite_add_error: expected_elog_calls.append( - mocker.call( - FAULT_MAPPING["error_while_adding_suite"].format( - error_message="Failed to add suite." - ) - ) + mocker.call(FAULT_MAPPING["error_while_adding_suite"].format(error_message="Failed to add suite.")) ) assert ( - expected_suite_id == result_suite_id + expected_suite_id == result_suite_id ), f"Expected suite_id: {expected_suite_id} but got {result_suite_id} instead." assert ( - expected_result_code == result_code + expected_result_code == result_code ), f"Expected suite_id: {expected_result_code} but got {result_code} instead." environment.get_prompt_response_for_auto_creation.assert_called_with( PROMPT_MESSAGES["create_new_suite"].format( @@ -197,9 +180,7 @@ def test_get_suite_id_multiple_suites_mode( ) ) if user_response: - project_based_client.api_request_handler.add_suites.assert_called_with( - project_id=project_id - ) + project_based_client.api_request_handler.add_suites.assert_called_with(project_id=project_id) environment.log.assert_has_calls(expected_log_calls) environment.elog.assert_has_calls(expected_elog_calls) @@ -210,13 +191,13 @@ def test_get_suite_id_multiple_suites_mode( ids=["get_suite_ids succeeds", "get_suite_ids fails"], ) def test_get_suite_id_single_suite_mode( - self, - suite_ids, - error_message, - expected_suite_id, - expected_result_code, - project_based_client_data_provider, - mocker, + self, + suite_ids, + error_message, + expected_suite_id, + expected_result_code, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check flow of get_suite_id_log_error function for single suite mode.""" @@ -238,10 +219,10 @@ def test_get_suite_id_single_suite_mode( result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." if error_message: environment.elog.assert_has_calls(expected_elog_calls) @@ -253,13 +234,13 @@ def test_get_suite_id_single_suite_mode( ids=TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_IDS, ) def test_get_suite_id_single_suite_mode_baselines( - self, - get_suite_ids_result, - expected_suite_id, - expected_result_code, - expected_error_message, - project_based_client_data_provider, - mocker, + self, + get_suite_ids_result, + expected_suite_id, + expected_result_code, + expected_error_message, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check flow of get_suite_id_log_error function for single suite with baselines mode.""" @@ -271,26 +252,22 @@ def test_get_suite_id_single_suite_mode_baselines( suite_mode = SuiteModes.single_suite_baselines project_based_client.api_request_handler.resolve_suite_id_using_name.return_value = (-1, "Any Error") project_based_client.api_request_handler.suites_data_from_provider.suite_id = None - project_based_client.api_request_handler.get_suite_ids.return_value = ( - get_suite_ids_result - ) + project_based_client.api_request_handler.get_suite_ids.return_value = get_suite_ids_result expected_elog_calls = [] if expected_error_message: expected_elog_calls = [mocker.call(expected_error_message)] result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." environment.elog.assert_has_calls(expected_elog_calls) @pytest.mark.project_based_client - def test_get_suite_id_unknown_suite_mode( - self, project_based_client_data_provider, mocker - ): + def test_get_suite_id_unknown_suite_mode(self, project_based_client_data_provider, mocker): """The purpose of this test is to check that get_suite_id will return -1 and print proper message when unknown suite mode will be returned during execution.""" ( @@ -302,18 +279,14 @@ def test_get_suite_id_unknown_suite_mode( expected_result_code = -1 expected_suite_id = -1 project_based_client.api_request_handler.suites_data_from_provider.suite_id = None - expected_elog_calls = [ - mocker.call( - FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode) - ) - ] + expected_elog_calls = [mocker.call(FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode))] result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." environment.elog.assert_has_calls(expected_elog_calls) @@ -333,13 +306,11 @@ def test_check_suite_id_returns_id(self, project_based_client_data_provider): result_code = project_based_client.check_suite_id(project_id=project_id) assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected to get {result_code} as result code, but got {expected_result_code} instead." @pytest.mark.project_based_client - def test_check_suite_id_prints_error_message( - self, project_based_client_data_provider, mocker - ): + def test_check_suite_id_prints_error_message(self, project_based_client_data_provider, mocker): """The purpose of this test is to check that proper message would be printed to the user and program will quit when suite ID is not present in TestRail.""" ( @@ -356,13 +327,11 @@ def test_check_suite_id_prints_error_message( ) result_code = project_based_client.check_suite_id(project_id=project_id) - expected_elog_calls = [ - mocker.call(FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) - ] + expected_elog_calls = [mocker.call(FAULT_MAPPING["missing_suite"].format(suite_id=suite_id))] environment.elog.assert_has_calls(expected_elog_calls) assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected to get {expected_result_code} as result code, but got {result_code} instead." def test_resolve_suite_returns_valid_id(self, project_based_client_data_provider): @@ -377,9 +346,7 @@ def test_resolve_suite_returns_valid_id(self, project_based_client_data_provider project_based_client.resolve_project() suite_id, suite_added = project_based_client.resolve_suite() - assert ( - suite_id == 1 - ), f"Expected suite id 1 but got {suite_id} instead." + assert suite_id == 1, f"Expected suite id 1 but got {suite_id} instead." def test_create_or_update_test_run_calls_add_run(self, project_based_client_data_provider): """The purpose of this test is to check that calling the method without a run_id in the environment causes @@ -396,12 +363,8 @@ def test_create_or_update_test_run_calls_add_run(self, project_based_client_data run_id, error_message = project_based_client.create_or_update_test_run() project_based_client.api_request_handler.add_run.assert_called_once() - assert ( - run_id == 1 - ), f"Expected run_id to be 1 but got {run_id} instead." - assert ( - error_message == "" - ), f"Expected error message to be None but got {error_message} instead." + assert run_id == 1, f"Expected run_id to be 1 but got {run_id} instead." + assert error_message == "", f"Expected error message to be None but got {error_message} instead." def test_create_or_update_test_run_calls_update_run(self, project_based_client_data_provider): """The purpose of this test is to check that calling the method with a run_id in the environment causes @@ -418,12 +381,8 @@ def test_create_or_update_test_run_calls_update_run(self, project_based_client_d run_id, error_message = project_based_client.create_or_update_test_run() api_request_handler.update_run.assert_called_once() - assert ( - run_id == 1 - ), f"Expected run_id to be 1 but got {run_id} instead." - assert ( - error_message == "" - ), f"Expected error message to be None but got {error_message} instead." + assert run_id == 1, f"Expected run_id to be 1 but got {run_id} instead." + assert error_message == "", f"Expected error message to be None but got {error_message} instead." def test_get_project_id(self, project_based_client_data_provider): """The purpose of this test is to check that the _get_project_id() will fall back to the environment.project_id @@ -434,7 +393,7 @@ def test_get_project_id(self, project_based_client_data_provider): project_based_client, ) = project_based_client_data_provider - assert ( - project_based_client._get_project_id() == environment.project_id - ), (f"Expected to get {environment.project_id} from project_based_client.get_project_id but got" - f" {project_based_client._get_project_id()} instead.") + assert project_based_client._get_project_id() == environment.project_id, ( + f"Expected to get {environment.project_id} from project_based_client.get_project_id but got" + f" {project_based_client._get_project_id()} instead." + ) diff --git a/trcli/api/api_client.py b/trcli/api/api_client.py index a412754..1742b38 100644 --- a/trcli/api/api_client.py +++ b/trcli/api/api_client.py @@ -39,7 +39,7 @@ class APIClient: PREFIX = "index.php?" VERSION = "/api/v2/" SUFFIX_API_V2_VERSION = f"{PREFIX}{VERSION}" - RETRY_ON = [429, 500, 502] + RETRY_ON = [429, 500, 502, 503, 504] # Added 503 Service Unavailable and 504 Gateway Timeout USER_AGENT = "TRCLI" def __init__( @@ -176,6 +176,12 @@ def __send_request( if status_code == 429: retry_time = float(response.headers["Retry-After"]) sleep(retry_time) + elif status_code in [500, 502, 503, 504] and i < self.retries: + backoff_time = min(2**i, 30) # Exponential backoff capped at 30 seconds + self.logging_function( + f"Server error {status_code}, retrying in {backoff_time}s (attempt {i+1}/{self.retries})..." + ) + sleep(backoff_time) try: # workaround for buggy legacy TR server version response if response.content.startswith(b"USER AUTHENTICATION SUCCESSFUL!\n"): diff --git a/trcli/settings.py b/trcli/settings.py index 7cd59dc..a4af4d1 100644 --- a/trcli/settings.py +++ b/trcli/settings.py @@ -1,6 +1,6 @@ MAX_WORKERS_ADD_CASE = 10 MAX_WORKERS_ADD_RESULTS = 10 -DEFAULT_API_CALL_RETRIES = 3 -DEFAULT_API_CALL_TIMEOUT = 30 +DEFAULT_API_CALL_RETRIES = 5 +DEFAULT_API_CALL_TIMEOUT = 60 DEFAULT_BATCH_SIZE = 50 ALLOW_ELAPSED_MS = False From b01ae0c600800487569cfdd7aeb56835828921bd Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 11 Nov 2025 17:56:24 +0800 Subject: [PATCH 04/10] TRCLI-203 Updated name and property case validation also added unit tests --- ...quest_handler_name_matcher_optimization.py | 425 ++++++++++++++++++ trcli/api/api_request_handler.py | 84 +++- 2 files changed, 503 insertions(+), 6 deletions(-) create mode 100644 tests/test_api_request_handler_name_matcher_optimization.py diff --git a/tests/test_api_request_handler_name_matcher_optimization.py b/tests/test_api_request_handler_name_matcher_optimization.py new file mode 100644 index 0000000..584541d --- /dev/null +++ b/tests/test_api_request_handler_name_matcher_optimization.py @@ -0,0 +1,425 @@ +""" +Unit tests for NAME matcher optimization that skips fetching all cases. + +Tests the performance optimization introduced to avoid downloading 165k+ cases +when using NAME or PROPERTY matcher, which only need to validate specific case IDs. +""" + +import pytest +from unittest.mock import patch, MagicMock, call +from pathlib import Path +import json +from serde.json import from_json + +from tests.helpers.api_client_helpers import TEST_RAIL_URL, create_url +from trcli.cli import Environment +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.api.api_client import APIClient, APIClientResult +from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailSection, TestRailCase, TestRailResult +from trcli.data_classes.data_parsers import MatchersParser + + +@pytest.fixture +def environment(): + """Create test environment""" + env = Environment() + env.project = "Test Project" + env.batch_size = 10 + return env + + +@pytest.fixture +def api_client(): + """Create test API client""" + return APIClient(host_name=TEST_RAIL_URL) + + +def create_test_suite_with_case_ids(num_cases=10): + """Helper to create test suite with specified number of cases with case IDs""" + test_cases = [] + for i in range(1, num_cases + 1): + test_case = TestRailCase( + case_id=i, + title=f"Test case {i}", + section_id=1, + result=TestRailResult(case_id=i, comment=f"Test result {i}", elapsed="1s", status_id=1), + ) + test_cases.append(test_case) + + section = TestRailSection(name="Test Section", section_id=1, suite_id=1, testcases=test_cases) + + return TestRailSuite(name="Test Suite", suite_id=1, testsections=[section]) + + +def create_test_suite_with_missing_case_ids(total_cases=10, missing_count=3): + """Helper to create test suite with some cases missing IDs""" + test_cases = [] + for i in range(1, total_cases + 1): + # First `missing_count` cases don't have case_id + case_id = None if i <= missing_count else i + test_case = TestRailCase( + case_id=case_id, + title=f"Test case {i}", + section_id=1, + result=TestRailResult(case_id=case_id, comment=f"Test result {i}", elapsed="1s", status_id=1), + ) + test_cases.append(test_case) + + section = TestRailSection(name="Test Section", section_id=1, suite_id=1, testcases=test_cases) + + return TestRailSuite(name="Test Suite", suite_id=1, testsections=[section]) + + +class TestNameMatcherOptimization: + """Test suite for NAME matcher performance optimizations""" + + @pytest.mark.api_handler + def test_name_matcher_skips_bulk_case_fetch(self, environment, api_client, mocker): + """ + Test that NAME matcher does NOT fetch all cases from TestRail. + This is the key optimization - we should skip the expensive get_all_cases call. + """ + # Setup: NAME matcher with 100 test cases + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=100) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock the get_all_cases method to track if it's called + mock_get_all_cases = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__get_all_cases", return_value=([], None) + ) + + # Mock validation to return all IDs as valid (skip actual validation) + mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 101)) + ) + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: get_all_cases should NOT have been called for NAME matcher + mock_get_all_cases.assert_not_called() + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_auto_matcher_still_fetches_all_cases(self, environment, api_client, mocker): + """ + Test that AUTO matcher STILL fetches all cases (required for automation ID lookup). + This ensures we didn't break the AUTO matcher functionality. + """ + # Setup: AUTO matcher + environment.case_matcher = MatchersParser.AUTO + test_suite = create_test_suite_with_case_ids(num_cases=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock get_all_cases to return some cases + mock_cases = [ + {"id": i, "custom_automation_id": f"test{i}", "title": f"Test {i}", "section_id": 1} for i in range(1, 11) + ] + mock_get_all_cases = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__get_all_cases", return_value=(mock_cases, None) + ) + + mocker.patch.object(api_request_handler.data_provider, "update_data") + + # Execute + project_id = 1 + api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: get_all_cases SHOULD be called for AUTO matcher + mock_get_all_cases.assert_called_once_with(project_id, 1) + + @pytest.mark.api_handler + def test_name_matcher_skips_validation_for_large_batches(self, environment, api_client, mocker): + """ + Test that validation is SKIPPED when: + - Using NAME matcher + - All tests have case IDs (no missing) + - More than 1000 case IDs (large batch) + """ + # Setup: NAME matcher with 2000 test cases (> 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation method to track if it's called + mock_validate = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 2001)) + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Validation should be SKIPPED for large batches + mock_validate.assert_not_called() + + # Should log that validation was skipped + skip_log_calls = [call for call in mock_log.call_args_list if "Skipping validation" in str(call)] + assert len(skip_log_calls) > 0, "Should log that validation was skipped" + + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_validates_small_batches(self, environment, api_client, mocker): + """ + Test that validation RUNS when: + - Using NAME matcher + - Less than 1000 case IDs (small batch) + """ + # Setup: NAME matcher with 500 test cases (< 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=500) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation method to track if it's called + mock_validate = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 501)) + ) + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Validation SHOULD run for small batches + mock_validate.assert_called_once() + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_validates_when_missing_case_ids(self, environment, api_client, mocker): + """ + Test that validation RUNS when: + - Using NAME matcher + - Some tests are missing case IDs (even if total > 1000) + This is important because missing IDs might indicate extraction errors. + """ + # Setup: 1500 total cases, 3 missing IDs + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_missing_case_ids(total_cases=1500, missing_count=3) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(4, 1501)), # Exclude the 3 missing + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Validation SHOULD run because there are missing case IDs + mock_validate.assert_called_once() + + # Should log that missing cases were found + missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)] + assert len(missing_log_calls) > 0, "Should log missing case IDs" + + assert missing_ids, "Should have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_detects_nonexistent_case_ids(self, environment, api_client, mocker): + """ + Test that NAME matcher correctly detects case IDs that don't exist in TestRail. + """ + # Setup: Test suite with case IDs 1-10 + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation: Only IDs 1-5 exist, 6-10 don't exist + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(1, 6)), # Only 1-5 exist + ) + + mock_elog = mocker.patch.object(environment, "elog") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Should detect nonexistent IDs + mock_validate.assert_called_once() + mock_elog.assert_called_once() + + # Check error message contains nonexistent IDs + error_call = mock_elog.call_args[0][0] + assert "Nonexistent case IDs" in error_call + assert "6" in error_call or "7" in error_call # At least some of the missing IDs + + assert not missing_ids, "missing_ids refers to tests without IDs in report" + assert error != "", "Should have error about nonexistent IDs" + + +class TestValidateCaseIdsExist: + """Test the __validate_case_ids_exist helper method""" + + @pytest.mark.api_handler + def test_validate_empty_list(self, environment, api_client): + """Test that empty list returns empty set""" + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[]) + + assert result == set(), "Empty list should return empty set" + + @pytest.mark.api_handler + def test_validate_small_batch_sequential(self, environment, api_client, requests_mock): + """ + Test validation of small batch (<=50 cases) uses sequential validation. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock get_case responses for IDs 1-10 + for i in range(1, 11): + requests_mock.get(create_url(f"get_case/{i}"), json={"id": i, "suite_id": 1, "title": f"Case {i}"}) + + # Add one non-existent case (returns 404) + requests_mock.get(create_url("get_case/999"), status_code=404, json={"error": "Not found"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist( + suite_id=1, case_ids=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999] + ) + + # Should return 1-10 (11 total requested, 1 doesn't exist) + assert result == set(range(1, 11)), "Should validate existing cases" + assert 999 not in result, "Non-existent case should not be in result" + + @pytest.mark.api_handler + def test_validate_large_batch_concurrent(self, environment, api_client, requests_mock): + """ + Test validation of large batch (>50 cases) uses concurrent validation. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock 100 case responses + for i in range(1, 101): + requests_mock.get(create_url(f"get_case/{i}"), json={"id": i, "suite_id": 1, "title": f"Case {i}"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist( + suite_id=1, case_ids=list(range(1, 101)) + ) + + # Should validate all 100 cases concurrently + assert result == set(range(1, 101)), "Should validate all cases" + assert len(result) == 100 + + @pytest.mark.api_handler + def test_validate_filters_wrong_suite(self, environment, api_client, requests_mock): + """ + Test that validation filters out cases belonging to different suite. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Case 1 belongs to suite 1 (correct) + requests_mock.get(create_url("get_case/1"), json={"id": 1, "suite_id": 1, "title": "Case 1"}) + + # Case 2 belongs to suite 2 (wrong suite) + requests_mock.get(create_url("get_case/2"), json={"id": 2, "suite_id": 2, "title": "Case 2"}) + + # Case 3 belongs to suite 1 (correct) + requests_mock.get(create_url("get_case/3"), json={"id": 3, "suite_id": 1, "title": "Case 3"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[1, 2, 3]) + + # Should only return cases from suite 1 + assert result == {1, 3}, "Should filter out case from wrong suite" + assert 2 not in result, "Case from wrong suite should be excluded" + + @pytest.mark.api_handler + def test_validate_handles_api_errors(self, environment, api_client, requests_mock): + """ + Test that validation gracefully handles API errors (404, 500, etc). + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Case 1: Success + requests_mock.get(create_url("get_case/1"), json={"id": 1, "suite_id": 1, "title": "Case 1"}) + + # Case 2: 404 Not Found + requests_mock.get(create_url("get_case/2"), status_code=404, json={"error": "Not found"}) + + # Case 3: 500 Server Error + requests_mock.get(create_url("get_case/3"), status_code=500, json={"error": "Internal error"}) + + # Case 4: Success + requests_mock.get(create_url("get_case/4"), json={"id": 4, "suite_id": 1, "title": "Case 4"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[1, 2, 3, 4]) + + # Should return only successful cases + assert result == {1, 4}, "Should only return successfully validated cases" + + +class TestPerformanceComparison: + """Tests demonstrating the performance improvement""" + + @pytest.mark.api_handler + def test_performance_auto_vs_name_matcher(self, environment, api_client, mocker): + """ + Demonstrate that NAME matcher makes fewer API calls than AUTO matcher. + This is a documentation test showing the optimization benefit. + """ + test_suite = create_test_suite_with_case_ids(num_cases=2000) + + # Test AUTO matcher (old way) + environment.case_matcher = MatchersParser.AUTO + api_request_handler_auto = ApiRequestHandler(environment, api_client, test_suite) + + mock_get_all_cases_auto = mocker.patch.object( + api_request_handler_auto, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i, "custom_automation_id": f"test{i}"} for i in range(1, 2001)], None), + ) + mocker.patch.object(api_request_handler_auto.data_provider, "update_data") + + api_request_handler_auto.check_missing_test_cases_ids(project_id=1) + + # AUTO matcher should call get_all_cases + assert mock_get_all_cases_auto.call_count == 1, "AUTO matcher fetches all cases" + + # Test NAME matcher (new optimized way) + environment.case_matcher = MatchersParser.NAME + test_suite_name = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler_name = ApiRequestHandler(environment, api_client, test_suite_name) + + mock_get_all_cases_name = mocker.patch.object( + api_request_handler_name, "_ApiRequestHandler__get_all_cases", return_value=([], None) + ) + + # Mock validation to skip it (large batch) + mocker.patch.object(environment, "log") + + api_request_handler_name.check_missing_test_cases_ids(project_id=1) + + # NAME matcher should NOT call get_all_cases + mock_get_all_cases_name.assert_not_called() + + print("\n" + "=" * 60) + print("PERFORMANCE COMPARISON") + print("=" * 60) + print(f"AUTO matcher: {mock_get_all_cases_auto.call_count} get_all_cases calls") + print(f"NAME matcher: {mock_get_all_cases_name.call_count} get_all_cases calls") + print(f"Improvement: {mock_get_all_cases_auto.call_count - mock_get_all_cases_name.call_count} fewer calls") + print("=" * 60) + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "-s"]) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index fa1095f..83a55e6 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -307,9 +307,14 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: """ missing_cases_number = 0 suite_id = self.suites_data_from_provider.suite_id - returned_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return False, error_message + + # Performance optimization: Only fetch all cases if using AUTO matcher + # NAME/PROPERTY matchers can validate case IDs individually + if self.environment.case_matcher == MatchersParser.AUTO: + returned_cases, error_message = self.__get_all_cases(project_id, suite_id) + if error_message: + return False, error_message + if self.environment.case_matcher == MatchersParser.AUTO: test_cases_by_aut_id = {} for case in returned_cases: @@ -337,16 +342,36 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: if missing_cases_number: self.environment.log(f"Found {missing_cases_number} test cases not matching any TestRail case.") else: + # For NAME or PROPERTY matcher we validate case IDs individually nonexistent_ids = [] - all_case_ids = [case["id"] for case in returned_cases] + case_ids_to_validate = set() + + # Collect all unique case IDs that need validation for section in self.suites_data_from_provider.testsections: for test_case in section.testcases: if not test_case.case_id: missing_cases_number += 1 - elif int(test_case.case_id) not in all_case_ids: - nonexistent_ids.append(test_case.case_id) + else: + case_ids_to_validate.add(int(test_case.case_id)) + if missing_cases_number: self.environment.log(f"Found {missing_cases_number} test cases without case ID in the report file.") + + # Validate case IDs exist in TestRail (batch validation for efficiency) + # Skip validation if all tests have case IDs and set is large (1000+ cases) + should_validate = len(case_ids_to_validate) < 1000 or missing_cases_number > 0 + + if case_ids_to_validate and should_validate: + self.environment.log(f"Validating {len(case_ids_to_validate)} case IDs exist in TestRail...") + validated_ids = self.__validate_case_ids_exist(suite_id, list(case_ids_to_validate)) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in validated_ids] + elif case_ids_to_validate and not should_validate: + self.environment.log( + f"Skipping validation of {len(case_ids_to_validate)} case IDs (all tests have IDs, trusting they exist). " + f"If you encounter errors, ensure all case IDs in your test report exist in TestRail." + ) + nonexistent_ids = [] + if nonexistent_ids: self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") return False, "Case IDs not in TestRail project or suite were detected in the report file." @@ -999,6 +1024,53 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ else: return [], response.error_message + def __validate_case_ids_exist(self, suite_id: int, case_ids: List[int]) -> set: + """ + Validate that case IDs exist in TestRail without fetching all cases. + Returns set of valid case IDs. + + :param suite_id: Suite ID + :param case_ids: List of case IDs to validate + :returns: Set of case IDs that exist in TestRail + """ + if not case_ids: + return set() + + valid_ids = set() + + # For large numbers of case IDs, use concurrent validation + if len(case_ids) > 50: + from concurrent.futures import ThreadPoolExecutor, as_completed + + def check_case_exists(case_id): + """Check if a single case exists""" + response = self.client.send_get(f"get_case/{case_id}") + if response.status_code == 200 and not response.error_message: + # Verify case belongs to correct project/suite + case_data = response.response_text + if case_data.get("suite_id") == suite_id: + return case_id + return None + + # Use 10 concurrent workers to validate IDs + with ThreadPoolExecutor(max_workers=10) as executor: + futures = {executor.submit(check_case_exists, cid): cid for cid in case_ids} + + for future in as_completed(futures): + result = future.result() + if result is not None: + valid_ids.add(result) + else: + # For small sets, validate sequentially + for case_id in case_ids: + response = self.client.send_get(f"get_case/{case_id}") + if response.status_code == 200 and not response.error_message: + case_data = response.response_text + if case_data.get("suite_id") == suite_id: + valid_ids.add(case_id) + + return valid_ids + # Label management methods def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: """ From 5407833d84926e3ceb04aa0301faa34411a5f373 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 17 Nov 2025 15:40:11 +0800 Subject: [PATCH 05/10] TRCLI-203 Added parallel pagination feature, also updated unit tests --- ... test_api_request_handler_case_matcher.py} | 166 ++++++++- trcli/api/api_request_handler.py | 316 ++++++++++++++++-- trcli/cli.py | 41 +-- trcli/settings.py | 4 +- 4 files changed, 463 insertions(+), 64 deletions(-) rename tests/{test_api_request_handler_name_matcher_optimization.py => test_api_request_handler_case_matcher.py} (72%) diff --git a/tests/test_api_request_handler_name_matcher_optimization.py b/tests/test_api_request_handler_case_matcher.py similarity index 72% rename from tests/test_api_request_handler_name_matcher_optimization.py rename to tests/test_api_request_handler_case_matcher.py index 584541d..6d4bb2f 100644 --- a/tests/test_api_request_handler_name_matcher_optimization.py +++ b/tests/test_api_request_handler_case_matcher.py @@ -192,23 +192,89 @@ def test_name_matcher_validates_small_batches(self, environment, api_client, moc assert error == "", "Should not have errors" @pytest.mark.api_handler - def test_name_matcher_validates_when_missing_case_ids(self, environment, api_client, mocker): + def test_name_matcher_fetches_all_cases_for_large_report_with_missing_ids(self, environment, api_client, mocker): """ - Test that validation RUNS when: + Test that for large reports with missing IDs, we FETCH ALL CASES instead of individual validation. + This is the new optimized behavior: - Using NAME matcher - - Some tests are missing case IDs (even if total > 1000) - This is important because missing IDs might indicate extraction errors. + - Large report (>=1000 total cases) + - Some tests are missing case IDs + + Strategy: Fetch all cases once (e.g., 660 calls for 165k cases) is more efficient than + individual validation (e.g., 1500 calls for 1500 cases in report). """ - # Setup: 1500 total cases, 3 missing IDs + # Setup: 1500 total cases, 3 missing IDs (total >= 1000 threshold) environment.case_matcher = MatchersParser.NAME test_suite = create_test_suite_with_missing_case_ids(total_cases=1500, missing_count=3) api_request_handler = ApiRequestHandler(environment, api_client, test_suite) - # Mock validation + # Mock get_all_cases to return all case IDs 4-1500 (cases 1-3 don't exist, matching missing IDs) + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i} for i in range(4, 1501)], None), + ) + + # Mock individual validation - should NOT be called for large reports + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(4, 1501)), + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Should FETCH ALL CASES for large reports with missing IDs + mock_get_all_cases.assert_called_once_with(project_id, 1) + + # Should NOT use individual validation + mock_validate.assert_not_called() + + # Should log that it's using fetch-all strategy + fetch_log_calls = [call for call in mock_log.call_args_list if "Fetching all cases" in str(call)] + assert len(fetch_log_calls) > 0, "Should log that fetch-all strategy is being used" + + # Should log that missing cases were found + missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)] + assert len(missing_log_calls) > 0, "Should log missing case IDs" + + assert missing_ids, "Should have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_validates_individually_for_small_report_with_missing_ids( + self, environment, api_client, mocker + ): + """ + Test that for small reports with missing IDs, we use INDIVIDUAL validation. + - Using NAME matcher + - Small report (<1000 total cases) + - Some tests are missing case IDs + + Strategy: Individual validation (e.g., 500 calls) is more efficient than + fetch all (e.g., 660 calls for 165k cases). + """ + # Setup: 500 total cases, 10 missing IDs (total < 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_missing_case_ids(total_cases=500, missing_count=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock individual validation mock_validate = mocker.patch.object( api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", - return_value=set(range(4, 1501)), # Exclude the 3 missing + return_value=set(range(11, 501)), # Exclude the 10 missing (1-10) + ) + + # Mock get_all_cases - should NOT be called for small reports + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([], None), ) mock_log = mocker.patch.object(environment, "log") @@ -217,9 +283,12 @@ def test_name_matcher_validates_when_missing_case_ids(self, environment, api_cli project_id = 1 missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) - # Assert: Validation SHOULD run because there are missing case IDs + # Assert: Should use INDIVIDUAL validation for small reports mock_validate.assert_called_once() + # Should NOT fetch all cases + mock_get_all_cases.assert_not_called() + # Should log that missing cases were found missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)] assert len(missing_log_calls) > 0, "Should log missing case IDs" @@ -376,12 +445,13 @@ def test_performance_auto_vs_name_matcher(self, environment, api_client, mocker) """ Demonstrate that NAME matcher makes fewer API calls than AUTO matcher. This is a documentation test showing the optimization benefit. - """ - test_suite = create_test_suite_with_case_ids(num_cases=2000) - # Test AUTO matcher (old way) + Scenario: Large report with all case IDs present (best case for NAME matcher) + """ + # Test AUTO matcher (always fetches all cases) environment.case_matcher = MatchersParser.AUTO - api_request_handler_auto = ApiRequestHandler(environment, api_client, test_suite) + test_suite_auto = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler_auto = ApiRequestHandler(environment, api_client, test_suite_auto) mock_get_all_cases_auto = mocker.patch.object( api_request_handler_auto, @@ -395,22 +465,31 @@ def test_performance_auto_vs_name_matcher(self, environment, api_client, mocker) # AUTO matcher should call get_all_cases assert mock_get_all_cases_auto.call_count == 1, "AUTO matcher fetches all cases" - # Test NAME matcher (new optimized way) - environment.case_matcher = MatchersParser.NAME + # Test NAME matcher with all IDs present (best case - skips validation) + env_name = Environment() + env_name.project = "Test Project" + env_name.batch_size = 10 + env_name.case_matcher = MatchersParser.NAME + test_suite_name = create_test_suite_with_case_ids(num_cases=2000) - api_request_handler_name = ApiRequestHandler(environment, api_client, test_suite_name) + api_request_handler_name = ApiRequestHandler(env_name, api_client, test_suite_name) mock_get_all_cases_name = mocker.patch.object( api_request_handler_name, "_ApiRequestHandler__get_all_cases", return_value=([], None) ) - # Mock validation to skip it (large batch) - mocker.patch.object(environment, "log") + mock_validate_name = mocker.patch.object( + api_request_handler_name, "_ApiRequestHandler__validate_case_ids_exist", return_value=set() + ) + + mocker.patch.object(env_name, "log") api_request_handler_name.check_missing_test_cases_ids(project_id=1) - # NAME matcher should NOT call get_all_cases + # NAME matcher should NOT call get_all_cases when all IDs present and report >= 1000 mock_get_all_cases_name.assert_not_called() + # Should also not call individual validation + mock_validate_name.assert_not_called() print("\n" + "=" * 60) print("PERFORMANCE COMPARISON") @@ -420,6 +499,57 @@ def test_performance_auto_vs_name_matcher(self, environment, api_client, mocker) print(f"Improvement: {mock_get_all_cases_auto.call_count - mock_get_all_cases_name.call_count} fewer calls") print("=" * 60) + @pytest.mark.api_handler + def test_performance_name_matcher_with_missing_ids(self, environment, api_client, mocker): + """ + Demonstrate smart strategy selection for NAME matcher with large reports containing missing IDs. + + Scenario: 5000 cases in report, 100 missing IDs + - Individual validation: 5000 API calls + - Fetch all + validate locally: ~660 API calls (for 165k cases in TestRail) + Strategy: Fetch all is more efficient + """ + env = Environment() + env.project = "Test Project" + env.batch_size = 10 + env.case_matcher = MatchersParser.NAME + + # 5000 cases, 100 missing IDs + test_suite = create_test_suite_with_missing_case_ids(total_cases=5000, missing_count=100) + api_request_handler = ApiRequestHandler(env, api_client, test_suite) + + # Mock get_all_cases to simulate fetching 165k cases + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i} for i in range(101, 5001)], None), # Cases 101-5000 exist + ) + + # Mock individual validation - should NOT be called + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(101, 5001)), + ) + + mocker.patch.object(env, "log") + + api_request_handler.check_missing_test_cases_ids(project_id=1) + + # Should use fetch-all strategy (more efficient for large reports) + mock_get_all_cases.assert_called_once() + mock_validate.assert_not_called() + + print("\n" + "=" * 60) + print("LARGE REPORT WITH MISSING IDS") + print("=" * 60) + print(f"Report size: 5000 cases, 100 missing IDs") + print(f"Strategy chosen: Fetch all cases") + print(f"API calls: 1 fetch (simulates ~660 paginated calls)") + print(f"Alternative: 4900 individual validation calls") + print(f"Efficiency: ~7.4x fewer calls") + print("=" * 60) + if __name__ == "__main__": pytest.main([__file__, "-v", "-s"]) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 83a55e6..7bca425 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1,4 +1,5 @@ import html, json +import time from concurrent.futures import ThreadPoolExecutor, as_completed from beartype.typing import List, Union, Tuple, Dict @@ -14,7 +15,12 @@ from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailCase, ProjectData from trcli.data_providers.api_data_provider import ApiDataProvider -from trcli.settings import MAX_WORKERS_ADD_RESULTS, MAX_WORKERS_ADD_CASE +from trcli.settings import ( + MAX_WORKERS_ADD_RESULTS, + MAX_WORKERS_ADD_CASE, + ENABLE_PARALLEL_PAGINATION, + MAX_WORKERS_PARALLEL_PAGINATION, +) class ApiRequestHandler: @@ -342,7 +348,7 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: if missing_cases_number: self.environment.log(f"Found {missing_cases_number} test cases not matching any TestRail case.") else: - # For NAME or PROPERTY matcher we validate case IDs individually + # For NAME or PROPERTY matcher we validate case IDs nonexistent_ids = [] case_ids_to_validate = set() @@ -354,27 +360,60 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: else: case_ids_to_validate.add(int(test_case.case_id)) + total_tests_in_report = missing_cases_number + len(case_ids_to_validate) + if missing_cases_number: self.environment.log(f"Found {missing_cases_number} test cases without case ID in the report file.") - # Validate case IDs exist in TestRail (batch validation for efficiency) - # Skip validation if all tests have case IDs and set is large (1000+ cases) - should_validate = len(case_ids_to_validate) < 1000 or missing_cases_number > 0 - - if case_ids_to_validate and should_validate: - self.environment.log(f"Validating {len(case_ids_to_validate)} case IDs exist in TestRail...") - validated_ids = self.__validate_case_ids_exist(suite_id, list(case_ids_to_validate)) - nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in validated_ids] - elif case_ids_to_validate and not should_validate: - self.environment.log( - f"Skipping validation of {len(case_ids_to_validate)} case IDs (all tests have IDs, trusting they exist). " - f"If you encounter errors, ensure all case IDs in your test report exist in TestRail." - ) - nonexistent_ids = [] + # Smart validation strategy based on report size + # Threshold: 1000 cases (same as skip validation threshold for consistency) + if case_ids_to_validate: + # Skip validation for large reports with all IDs (most efficient) + if missing_cases_number == 0 and total_tests_in_report >= 1000: + # All tests have IDs and report is large: Skip validation (trust IDs) + self.environment.log( + f"Skipping validation of {len(case_ids_to_validate)} case IDs " + f"(all tests have IDs, trusting they exist). " + f"If you encounter errors, ensure all case IDs in your test report exist in TestRail." + ) + nonexistent_ids = [] + + # Fetch all for large reports with missing IDs + elif total_tests_in_report >= 1000: + # Large report (>=1000 cases) with some missing IDs: Fetch all cases and validate locally + # This is more efficient than individual validation for large batches + self.environment.log( + f"Large report detected ({total_tests_in_report} cases). " + f"Fetching all cases from TestRail for efficient validation..." + ) + returned_cases, error_message = self.__get_all_cases(project_id, suite_id) + if error_message: + return False, error_message - if nonexistent_ids: - self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") - return False, "Case IDs not in TestRail project or suite were detected in the report file." + # Build lookup dictionary from fetched cases + all_case_ids = {case["id"] for case in returned_cases} + + # Validate locally (O(1) lookup) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in all_case_ids] + + if nonexistent_ids: + self.environment.elog( + f"Nonexistent case IDs found in the report file: {nonexistent_ids[:20]}" + f"{' ...' if len(nonexistent_ids) > 20 else ''}" + ) + return False, "Case IDs not in TestRail project or suite were detected in the report file." + + # Individual validation for small reports + else: + # Small report (<1000 cases): Use individual validation + # This is more efficient for small batches + self.environment.log(f"Validating {len(case_ids_to_validate)} case IDs exist in TestRail...") + validated_ids = self.__validate_case_ids_exist(suite_id, list(case_ids_to_validate)) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in validated_ids] + + if nonexistent_ids: + self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") + return False, "Case IDs not in TestRail project or suite were detected in the report file." return missing_cases_number > 0, "" @@ -1002,7 +1041,18 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ Get all entities from all pages if number of entities is too big to return in single response. Function using next page field in API response. Entity examples: cases, sections + + If ENABLE_PARALLEL_PAGINATION is True or --parallel-pagination flag is set, + will use parallel fetching for better performance. """ + # Check if parallel pagination is enabled (CLI flag takes precedence) + parallel_enabled = getattr(self.environment, "parallel_pagination", False) or ENABLE_PARALLEL_PAGINATION + + # Use parallel pagination if enabled and this is the first call (entities is empty) + if parallel_enabled and not entities: + return self.__get_all_entities_parallel(entity, link) + + # Otherwise use sequential pagination (original implementation) if link.startswith(self.suffix): link = link.replace(self.suffix, "") response = self.client.send_get(link) @@ -1024,6 +1074,234 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ else: return [], response.error_message + def __get_all_entities_parallel(self, entity: str, link: str) -> Tuple[List[Dict], str]: + """ + Parallel version of __get_all_entities for faster pagination. + Fetches multiple pages concurrently using ThreadPoolExecutor. + + :param entity: Entity type (cases, sections, etc.) + :param link: Initial API link + :returns: Tuple of (all entities list, error message) + """ + fetch_start_time = time.time() + + if link.startswith(self.suffix): + link = link.replace(self.suffix, "") + + # Step 1: Fetch first page to get metadata + self.environment.log(f"Fetching first page to determine total pages...") + response = self.client.send_get(link) + + if response.error_message: + return [], response.error_message + + # Handle non-paginated responses (legacy endpoints) + if isinstance(response.response_text, list): + return response.response_text, response.error_message + + if isinstance(response.response_text, str): + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) + return [], error_msg + + # Collect first page results + all_entities = response.response_text[entity] + first_page_count = len(all_entities) + + # Check if there are more pages + if response.response_text["_links"]["next"] is None: + # Only one page, return immediately + fetch_time = time.time() - fetch_start_time + self.environment.log(f"Single page fetch completed in {fetch_time:.1f}s") + return all_entities, response.error_message + + # Step 2: Calculate total pages needed + # TestRail pagination uses limit parameter (default 250) + # We need to parse the next link to understand pagination structure + next_link = response.response_text["_links"]["next"] + + # Extract offset/limit from the link to calculate total pages + import re + from urllib.parse import urlparse, parse_qs + + # Parse the next link to get offset and limit + parsed = urlparse(next_link) + query_params = parse_qs(parsed.query) + + # Get limit (page size) - default to 250 if not found + limit = int(query_params.get("limit", [250])[0]) + if limit == 0: + limit = 250 + + # Get offset from next link + next_offset = int(query_params.get("offset", [limit])[0]) + + # Step 3: Fetch pages in parallel with dynamic offset generation + # Build base link without offset parameter + # TestRail API uses '&' as separator (e.g., get_cases/123&suite_id=2&offset=250) + base_link = link.split("&offset=")[0].split("?offset=")[0] + + self.environment.log( + f"Starting parallel fetch: first page has {first_page_count} {entity}, " + f"fetching remaining pages with {MAX_WORKERS_PARALLEL_PAGINATION} workers..." + ) + + def fetch_page(offset): + """Fetch a single page by offset""" + # TestRail always uses '&' as separator, not '?' + page_link = f"{base_link}&offset={offset}&limit={limit}" + page_response = self.client.send_get(page_link) + + if page_response.error_message: + return None, page_response.error_message + + if isinstance(page_response.response_text, dict) and entity in page_response.response_text: + page_data = page_response.response_text[entity] + # Return empty list if this page has no data (we've reached the end) + if not page_data: + return [], None + return page_data, None + else: + return None, "Invalid response format" + + # Fetch pages in parallel with intelligent batching to avoid overwhelming server + error_message = "" + pages_fetched = 1 # We already have the first page + + # Use batching: submit batches of pages, check results, submit next batch + # This prevents overwhelming the server with 10k requests at once + batch_size = 100 # Submit 100 pages at a time + current_page_index = 0 + max_pages = 10000 # Safety cap + consecutive_empty_pages = 0 + max_consecutive_empty = 10 # Stop after 10 consecutive empty pages + + with ThreadPoolExecutor(max_workers=MAX_WORKERS_PARALLEL_PAGINATION) as executor: + should_continue = True + + while should_continue and current_page_index < max_pages: + # Submit next batch of pages + futures = {} + batch_offsets = [] + + for i in range(batch_size): + if current_page_index + i >= max_pages: + break + offset = next_offset + ((current_page_index + i) * limit) + batch_offsets.append(offset) + future = executor.submit(fetch_page, offset) + futures[future] = offset + + if not futures: + break + + # Process this batch + batch_had_data = False + for future in as_completed(futures): + offset = futures[future] + try: + page_data, page_error = future.result() + + if page_error: + error_message = page_error + self.environment.elog(f"Error fetching page at offset {offset}: {page_error}") + should_continue = False + # Cancel remaining futures in this batch + for f in futures: + if not f.done(): + f.cancel() + break + + if page_data is None: + # Error occurred + error_message = "Invalid response format" + should_continue = False + # Cancel remaining + for f in futures: + if not f.done(): + f.cancel() + break + + if len(page_data) == 0: + # Empty page + consecutive_empty_pages += 1 + if consecutive_empty_pages >= max_consecutive_empty: + # We've hit enough empty pages, stop fetching + self.environment.log(f"Reached end of data after {consecutive_empty_pages} empty pages") + should_continue = False + # Cancel remaining futures in this batch + for f in futures: + if not f.done(): + f.cancel() + break + else: + # Got data - reset consecutive empty counter + consecutive_empty_pages = 0 + batch_had_data = True + + # Add results to our collection + all_entities.extend(page_data) + pages_fetched += 1 + + # Log progress every 50 pages + if pages_fetched % 50 == 0: + self.environment.log( + f"Fetched {pages_fetched} pages, {len(all_entities)} {entity} so far..." + ) + + except Exception as ex: + error_message = f"Exception during parallel fetch: {str(ex)}" + self.environment.elog(error_message) + should_continue = False + # Cancel remaining + for f in futures: + if not f.done(): + f.cancel() + break + + # Move to next batch + current_page_index += batch_size + + # If this batch had no data at all, we've likely reached the end + if not batch_had_data and consecutive_empty_pages > 0: + should_continue = False + + fetch_time = time.time() - fetch_start_time + + if error_message: + self.environment.elog(f"Parallel fetch failed after {fetch_time:.1f}s, falling back to sequential...") + # Fall back to sequential fetch + return self.__get_all_entities_sequential(entity, link, []) + + self.environment.log( + f"Parallel fetch completed: {len(all_entities)} {entity} in {fetch_time:.1f}s " + f"(~{len(all_entities) / fetch_time:.0f} items/sec)" + ) + + return all_entities, "" + + def __get_all_entities_sequential(self, entity: str, link: str, entities: List[Dict]) -> Tuple[List[Dict], str]: + """ + Sequential fallback for __get_all_entities (original implementation). + This is kept separate for fallback purposes. + """ + if link.startswith(self.suffix): + link = link.replace(self.suffix, "") + response = self.client.send_get(link) + if not response.error_message: + if isinstance(response.response_text, list): + return response.response_text, response.error_message + if isinstance(response.response_text, str): + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) + return [], error_msg + entities = entities + response.response_text[entity] + if response.response_text["_links"]["next"] is not None: + next_link = response.response_text["_links"]["next"].replace("limit=0", "limit=250") + return self.__get_all_entities_sequential(entity, link=next_link, entities=entities) + else: + return entities, response.error_message + else: + return [], response.error_message + def __validate_case_ids_exist(self, suite_id: int, case_ids: List[int]) -> set: """ Validate that case IDs exist in TestRail without fetching all cases. diff --git a/trcli/cli.py b/trcli/cli.py index 155e1e6..6fa281b 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -77,6 +77,7 @@ def __init__(self, cmd="parse_junit"): self.assign_failed_to = None # Add proxy related attributes self.noproxy = None self.proxy_user = None + self.parallel_pagination = None @property def case_fields(self): @@ -90,7 +91,7 @@ def case_fields(self, case_fields: Union[List[str], dict]): exit(1) self._case_fields = fields_dict - @property + @property def result_fields(self): return self._result_fields @@ -202,18 +203,11 @@ def parse_params_from_config_file(self, file_path: Path): for page_content in file_content: if page_content: self.params_from_config.update(page_content) - if ( - self.params_from_config.get("config") is not None - and self.default_config_file - ): + if self.params_from_config.get("config") is not None and self.default_config_file: self.default_config_file = False - self.parse_params_from_config_file( - self.params_from_config["config"] - ) + self.parse_params_from_config_file(self.params_from_config["config"]) except (yaml.YAMLError, ValueError, TypeError) as e: - self.elog( - FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path) - ) + self.elog(FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path)) self.elog(f"Error details:\n{e}") if not self.default_config_file: exit(1) @@ -280,10 +274,13 @@ def main(self, *args, **kwargs): ) @click.option("-u", "--username", type=click.STRING, metavar="", help="Username.") @click.option("-p", "--password", type=click.STRING, metavar="", help="Password.") -@click.option("-k", "--key", metavar="", help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.") @click.option( - "-v", "--verbose", is_flag=True, help="Output all API calls and their results." + "-k", + "--key", + metavar="", + help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.", ) +@click.option("-v", "--verbose", is_flag=True, help="Output all API calls and their results.") @click.option("--verify", is_flag=True, help="Verify the data was added correctly.") @click.option("--insecure", is_flag=True, help="Allow insecure requests.") @click.option( @@ -328,22 +325,14 @@ def main(self, *args, **kwargs): help="Silence stdout", default=False, ) +@click.option("--proxy", metavar="", help="Proxy address and port (e.g., http://proxy.example.com:8080).") +@click.option("--proxy-user", metavar="", help="Proxy username and password in the format 'username:password'.") @click.option( - "--proxy", - metavar="", - help="Proxy address and port (e.g., http://proxy.example.com:8080)." + "--noproxy", metavar="", help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)." ) @click.option( - "--proxy-user", - metavar="", - help="Proxy username and password in the format 'username:password'." + "--parallel-pagination", is_flag=True, help="Enable parallel pagination for faster case fetching (experimental)." ) -@click.option( - "--noproxy", - metavar="", - help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)." -) - def cli(environment: Environment, context: click.core.Context, *args, **kwargs): """TestRail CLI""" if not sys.argv[1:]: @@ -354,6 +343,6 @@ def cli(environment: Environment, context: click.core.Context, *args, **kwargs): if not context.invoked_subcommand: print(MISSING_COMMAND_SLOGAN) exit(2) - + environment.parse_config_file(context) environment.set_parameters(context) diff --git a/trcli/settings.py b/trcli/settings.py index a4af4d1..829af7d 100644 --- a/trcli/settings.py +++ b/trcli/settings.py @@ -1,6 +1,8 @@ MAX_WORKERS_ADD_CASE = 10 -MAX_WORKERS_ADD_RESULTS = 10 +MAX_WORKERS_ADD_RESULTS = 20 DEFAULT_API_CALL_RETRIES = 5 DEFAULT_API_CALL_TIMEOUT = 60 DEFAULT_BATCH_SIZE = 50 ALLOW_ELAPSED_MS = False +ENABLE_PARALLEL_PAGINATION = False +MAX_WORKERS_PARALLEL_PAGINATION = 10 From 2150e6a41623928fc5075651e16659b65f95a31f Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 20 Nov 2025 17:04:16 +0800 Subject: [PATCH 06/10] TRCLI-202 Updated file attachment handling and display errors for failed upload e.g. due to large file size --- CHANGELOG.MD | 3 +- tests/test_api_request_handler.py | 449 +++++++-------- trcli/api/api_request_handler.py | 913 ++++++++++++++++-------------- trcli/constants.py | 30 +- 4 files changed, 712 insertions(+), 683 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index f408950..bee7f26 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -8,10 +8,11 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb ## [1.12.5] -_released 11-03-2025 +_released 11-23-2025 ### Fixed - Fixed an issue where adding labels to project fails using label add command + - Fixed an issue where failed attachment upload errors (due to file size being too large) is not displayed in standard output. ## [1.12.4] diff --git a/tests/test_api_request_handler.py b/tests/test_api_request_handler.py index 4f17c37..a093d22 100644 --- a/tests/test_api_request_handler.py +++ b/tests/test_api_request_handler.py @@ -23,9 +23,7 @@ def _make_handler(verify=False, custom_json=None): environment.batch_size = 10 environment.case_matcher = MatchersParser.AUTO if custom_json is None: - json_path = ( - Path(__file__).parent / "test_data/json/api_request_handler.json" - ) + json_path = Path(__file__).parent / "test_data/json/api_request_handler.json" else: json_path = custom_json file_json = open(json_path) @@ -49,17 +47,13 @@ def api_request_handler_verify(handler_maker): @pytest.fixture(scope="function") def api_request_handler_update_case_json(handler_maker): - json_path = ( - Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" - ) + json_path = Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" yield handler_maker(custom_json=json_path, verify=False) class TestApiRequestHandler: @pytest.mark.api_handler - def test_return_project( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project(self, api_request_handler: ApiRequestHandler, requests_mock): mocked_response = { "offset": 0, "limit": 250, @@ -72,7 +66,7 @@ def test_return_project( {"id": 1, "name": "DataHub", "suite_mode": 1}, {"id": 2, "name": "Test Project", "suite_mode": 1}, {"id": 3, "name": "DataHub", "suite_mode": 1}, - ] + ], } requests_mock.get(create_url("get_projects"), json=mocked_response) assert api_request_handler.get_project_data("Test Project") == ProjectData( @@ -107,9 +101,7 @@ def test_return_project( ), "Get project should return proper project data object" @pytest.mark.api_handler - def test_return_project_legacy_response( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project_legacy_response(self, api_request_handler: ApiRequestHandler, requests_mock): mocked_response = [ {"id": 1, "name": "DataHub", "suite_mode": 1}, {"id": 2, "name": "Test Project", "suite_mode": 1}, @@ -131,15 +123,15 @@ def test_return_project_legacy_response_with_buggy_authentication_prefix( {"id": 3, "name": "DataHub", "suite_mode": 1}, ] - requests_mock.get(create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n"+json.dumps(mocked_response)) + requests_mock.get( + create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n" + json.dumps(mocked_response) + ) assert api_request_handler.get_project_data("Test Project") == ProjectData( project_id=2, suite_mode=1, error_message="" ), "Get project should return proper project data object" @pytest.mark.api_handler - def test_check_suite_exists( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_check_suite_exists(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, @@ -155,7 +147,7 @@ def test_check_suite_exists( False, FAULT_MAPPING["missing_suite"].format(suite_id=6), ), "Given suite id should NOT exist in mocked response." - + @pytest.mark.api_handler def test_check_suite_exists_with_pagination(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 @@ -167,7 +159,7 @@ def test_check_suite_exists_with_pagination(self, api_request_handler: ApiReques "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, - ] + ], } requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) @@ -207,16 +199,13 @@ def test_add_suite(self, api_request_handler: ApiRequestHandler, requests_mock): assert error == "", "Error occurred in add_suite" assert ( - api_request_handler.suites_data_from_provider.suite_id - == mocked_response["id"] + api_request_handler.suites_data_from_provider.suite_id == mocked_response["id"] ), "Added suite id in DataProvider doesn't match mocked response id." @pytest.mark.api_handler - def test_check_missing_sections_true( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_sections_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "_links": {"next": None, "prev": None}, "sections": [ @@ -225,25 +214,19 @@ def test_check_missing_sections_true( "suite_id": 4, "name": "Skipped test", } - ] + ], } - requests_mock.get( - create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response - ) + requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response) missing, _ = api_request_handler.check_missing_section_ids(project_id) - update_data_mock.assert_called_with( - section_data=[{'section_id': 0, 'suite_id': 4, 'name': 'Skipped test'}] - ) + update_data_mock.assert_called_with(section_data=[{"section_id": 0, "suite_id": 4, "name": "Skipped test"}]) assert missing, "There should be missing section" @pytest.mark.api_handler - def test_check_missing_sections_false( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_sections_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "_links": {"next": None, "prev": None}, "sections": [ @@ -256,19 +239,17 @@ def test_check_missing_sections_false( "id": 2, "suite_id": 4, "name": "Passed test", - } - ] + }, + ], } - requests_mock.get( - create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response - ) + requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response) missing, _ = api_request_handler.check_missing_section_ids(project_id) update_data_mock.assert_called_with( section_data=[ - {'name': 'Skipped test', 'section_id': 1, 'suite_id': 4}, - {'name': 'Passed test', 'section_id': 2, 'suite_id': 4} + {"name": "Skipped test", "section_id": 1, "suite_id": 4}, + {"name": "Passed test", "section_id": 2, "suite_id": 4}, ] ) assert not missing, "There should be no missing section" @@ -282,9 +263,7 @@ def test_add_sections(self, api_request_handler: ApiRequestHandler, requests_moc "name": "Passed test", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.add_sections(project_id) assert ( @@ -296,20 +275,17 @@ def test_add_sections(self, api_request_handler: ApiRequestHandler, requests_moc assert error == "", "Error occurred in add_section" assert ( - api_request_handler.suites_data_from_provider.testsections[1].section_id - == mocked_response["id"] + api_request_handler.suites_data_from_provider.testsections[1].section_id == mocked_response["id"] ), "Added section id in DataProvider doesn't match mocked response id." @pytest.mark.api_handler - def test_add_section_and_cases( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_section_and_cases(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response_for_section = { "id": 12345, "suite_id": 4, "name": "Passed test", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } mocked_response_for_case_1 = { @@ -317,7 +293,7 @@ def test_add_section_and_cases( "suite_id": 4, "section_id": 1234, "title": "testCase2", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } mocked_response_for_case_2 = { @@ -325,12 +301,10 @@ def test_add_section_and_cases( "suite_id": 4, "section_id": 12345, "title": "testCase3", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response_for_section - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section) requests_mock.post( create_url(f"add_case/{mocked_response_for_case_1['section_id']}"), json=mocked_response_for_case_1, @@ -371,9 +345,7 @@ def test_add_run(self, api_request_handler: ApiRequestHandler, requests_mock): requests_mock.post(create_url(f"add_run/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.add_run(project_id, run_name) - assert ( - mocked_response["id"] == resources_added - ), "Added run id doesn't match mocked response id" + assert mocked_response["id"] == resources_added, "Added run id doesn't match mocked response id" assert error == "", "Error occurred in add_case" @pytest.mark.api_handler @@ -394,40 +366,37 @@ def test_add_results(self, api_request_handler: ApiRequestHandler, requests_mock "version": "1.0RC1", } ] - requests_mock.post( - create_url(f"add_results_for_cases/{run_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_results_for_cases/{run_id}"), json=mocked_response) tests_mocked_response = { - 'offset': 0, - 'limit': 250, - 'size': 4, - '_links': {'next': None, 'prev': None}, - 'tests': [ + "offset": 0, + "limit": 250, + "size": 4, + "_links": {"next": None, "prev": None}, + "tests": [ { - 'id': 4, - 'case_id': 1, - 'status_id': 5, - 'assignedto_id': None, - 'run_id': run_id, - 'title': 'Fail To Login With Invalid Password' - } - ] + "id": 4, + "case_id": 1, + "status_id": 5, + "assignedto_id": None, + "run_id": run_id, + "title": "Fail To Login With Invalid Password", + } + ], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response) attachments_mock_response = {"attachment_id": 123} - requests_mock.post( - create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response - ) + requests_mock.post(create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response) with patch("builtins.open", mock_open()) as mock_file: resources_added, error, results_added = api_request_handler.add_results(run_id) assert [mocked_response] == resources_added, "Invalid response from add_results" assert error == "", "Error occurred in add_results" - assert results_added == len(mocked_response), \ - f"Expected {len(mocked_response)} results to be added but got {results_added} instead." + assert results_added == len( + mocked_response + ), f"Expected {len(mocked_response)} results to be added but got {results_added} instead." mock_file.assert_any_call("./path1", "rb") mock_file.assert_any_call("./path2", "rb") @@ -446,12 +415,10 @@ def test_close_run(self, api_request_handler: ApiRequestHandler, requests_mock): assert error == "", "Error occurred in close_run" @pytest.mark.api_handler - def test_check_missing_test_cases_ids_true( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_test_cases_ids_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_id = api_request_handler.suites_data_from_provider.suite_id - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response_page_1 = { "_links": { "next": None, @@ -459,16 +426,14 @@ def test_check_missing_test_cases_ids_true( }, "cases": [ {"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234}, - {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234} + {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}, ], } requests_mock.get( create_url(f"get_cases/{project_id}&suite_id={suite_id}"), json=mocked_response_page_1, ) - missing_ids, error = api_request_handler.check_missing_test_cases_ids( - project_id - ) + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) update_data_mock.assert_called_with( case_data=[ @@ -476,27 +441,24 @@ def test_check_missing_test_cases_ids_true( "case_id": 1, "custom_automation_id": "Skipped test.testCase1", "section_id": 1234, - "title": "testCase1" + "title": "testCase1", }, { "case_id": 2, - "custom_automation_id": - "Skipped test.testCase2", + "custom_automation_id": "Skipped test.testCase2", "section_id": 1234, - "title": "testCase2" - } + "title": "testCase2", + }, ] ) assert missing_ids, "There is one missing test case" assert error == "", "Error occurred in check" @pytest.mark.api_handler - def test_check_missing_test_cases_ids_false( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_test_cases_ids_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_id = api_request_handler.suites_data_from_provider.suite_id - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response_page_1 = { "_links": { "next": f"/api/v2/get_cases/{project_id}&suite_id={suite_id}&limit=1&offset=1", @@ -504,7 +466,7 @@ def test_check_missing_test_cases_ids_false( }, "cases": [ {"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234}, - {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234} + {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}, ], } mocked_response_page_2 = { @@ -521,29 +483,22 @@ def test_check_missing_test_cases_ids_false( create_url(f"get_cases/{project_id}&suite_id={suite_id}&limit=1&offset=1"), json=mocked_response_page_2, ) - missing_ids, error = api_request_handler.check_missing_test_cases_ids( - project_id - ) + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) update_data_mock.assert_called_with( case_data=[ { "case_id": 1, "custom_automation_id": "Skipped test.testCase1", "section_id": 1234, - "title": "testCase1" + "title": "testCase1", }, { "case_id": 2, "custom_automation_id": "Skipped test.testCase2", "section_id": 1234, - "title": "testCase2" + "title": "testCase2", }, - { - "case_id": 1, - "custom_automation_id": "Passed test.testCase3", - "section_id": 2, - "title": "testCase3" - } + {"case_id": 1, "custom_automation_id": "Passed test.testCase3", "section_id": 2, "title": "testCase3"}, ] ) assert not missing_ids, "No missing ids" @@ -560,35 +515,30 @@ def test_get_suite_ids(self, api_request_handler: ApiRequestHandler, requests_mo requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.get_suite_ids(project_id) assert ( - resources_added[0] == mocked_response[0]["id"] and - resources_added[1] == mocked_response[1]["id"] + resources_added[0] == mocked_response[0]["id"] and resources_added[1] == mocked_response[1]["id"] ), "ID in response doesn't match mocked response" @pytest.mark.api_handler - def test_get_suite_ids_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_get_suite_ids_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 - - requests_mock.get( - create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout - ) - + + requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout) + suite_ids, error = api_request_handler.get_suite_ids(project_id) - + assert suite_ids == [], "Should return empty list on API error" - assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ - " Please check your settings and try again.", "Should return connection error message" + assert ( + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + " Please check your settings and try again." + ), "Should return connection error message" @pytest.mark.api_handler - def test_resolve_suite_id_using_name( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_resolve_suite_id_using_name(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_name = "Suite2" api_request_handler.suites_data_from_provider.name = suite_name - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "offset": 0, @@ -598,43 +548,36 @@ def test_resolve_suite_id_using_name( "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, - ] + ], } - + requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) - + suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) - + assert suite_id == 5, "Should return the correct suite ID for matching name with pagination" assert error == "", "Should have no error message" - + update_data_mock.assert_called_once_with([{"suite_id": 5, "name": "Suite2"}]) @pytest.mark.api_handler - def test_resolve_suite_id_using_name_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_resolve_suite_id_using_name_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 - requests_mock.get( - create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout - ) + requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout) suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) assert suite_id == -1, "Should return -1 on API error" - assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ - " Please check your settings and try again.", "Should return connection error message" - + assert ( + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + " Please check your settings and try again." + ), "Should return connection error message" @pytest.mark.api_handler - def test_return_project_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project_error(self, api_request_handler: ApiRequestHandler, requests_mock): - requests_mock.get( - create_url("get_projects"), exc=requests.exceptions.ConnectTimeout - ) + requests_mock.get(create_url("get_projects"), exc=requests.exceptions.ConnectTimeout) assert api_request_handler.get_project_data("Test Project") == ProjectData( project_id=-3, suite_mode=-1, @@ -643,9 +586,7 @@ def test_return_project_error( ), "Get project should return proper project data object with error" @pytest.mark.api_handler - def test_add_suite_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_suite_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 api_request_handler.suites_data_from_provider.suite_id = None @@ -658,15 +599,12 @@ def test_add_suite_error( assert resources_added == [], "No resources should be added" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" @pytest.mark.api_handler - def test_add_sections_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_sections_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 requests_mock.post( create_url(f"add_section/{project_id}"), @@ -676,20 +614,16 @@ def test_add_sections_error( assert resources_added == [], "No resources should be added" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" assert ( - api_request_handler.suites_data_from_provider.testsections[1].section_id - is None + api_request_handler.suites_data_from_provider.testsections[1].section_id is None ), "No resources should be added to DataProvider" @pytest.mark.api_handler - def test_add_section_and_cases_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_section_and_cases_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response_for_section = { "id": 12345, @@ -702,7 +636,7 @@ def test_add_section_and_cases_error( "suite_id": 4, "section_id": 1234, "title": "testCase2", - "custom_automation_id": "Skipped test.testCase2" + "custom_automation_id": "Skipped test.testCase2", } mocked_response_for_case_2 = { @@ -710,12 +644,10 @@ def test_add_section_and_cases_error( "suite_id": 4, "section_id": 12345, "title": "testCase3", - "custom_automation_id": "Passed test.testCase3" + "custom_automation_id": "Passed test.testCase3", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response_for_section - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section) requests_mock.post( create_url(f"add_case/{mocked_response_for_case_1['section_id']}"), json=mocked_response_for_case_1, @@ -735,65 +667,55 @@ def test_add_section_and_cases_error( mocked_response_for_case_1["id"], ], "Added case id doesn't match mocked response id" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" @pytest.mark.api_handler - def test_add_results_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_results_error(self, api_request_handler: ApiRequestHandler, requests_mock): run_id = 3 requests_mock.post( create_url(f"add_results_for_cases/{run_id}"), exc=requests.exceptions.ConnectTimeout, ) tests_mocked_response = { - 'offset': 0, - 'limit': 250, - 'size': 4, - '_links': {'next': None, 'prev': None}, - 'tests': [ + "offset": 0, + "limit": 250, + "size": 4, + "_links": {"next": None, "prev": None}, + "tests": [ { - 'id': 18319, - 'case_id': 6086, - 'status_id': 5, - 'assignedto_id': None, - 'run_id': run_id, - 'title': 'Fail To Login With Invalid Password' - } - ] + "id": 18319, + "case_id": 6086, + "status_id": 5, + "assignedto_id": None, + "run_id": run_id, + "title": "Fail To Login With Invalid Password", + } + ], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response) resources_added, error, results_added = api_request_handler.add_results(run_id) assert resources_added == [], "Expected empty list of added resources" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" assert results_added == 0, "Expected 0 resources to be added." @pytest.mark.api_handler - def test_add_results_keyboard_interrupt( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_add_results_keyboard_interrupt(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): run_id = 3 requests_mock.post( create_url(f"add_results_for_cases/{run_id}"), exc=requests.exceptions.ConnectTimeout, ) - mocker.patch( - "trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt - ) + mocker.patch("trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt) with pytest.raises(KeyboardInterrupt) as exception: api_request_handler.add_results(run_id) @pytest.mark.api_handler - def test_add_suite_with_verify( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_add_suite_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = { "description": "..", @@ -818,9 +740,7 @@ def test_add_section_with_verify(self, handler_maker, requests_mock): "description": "Some description", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response) api_request_handler_verify = handler_maker(verify=True) resources_added, error = api_request_handler_verify.add_sections(project_id) assert error == "", "There should be no error in verification." @@ -828,45 +748,34 @@ def test_add_section_with_verify(self, handler_maker, requests_mock): api_request_handler_verify = handler_maker(verify=True) resources_added, error = api_request_handler_verify.add_sections(project_id) assert ( - error - == "Data verification failed. Server added different resource than expected." + error == "Data verification failed. Server added different resource than expected." ), "There should be error in verification." @pytest.mark.api_handler - def test_add_case_with_verify( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_add_case_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock): mocked_response_for_case = { "id": 3, "suite_id": 4, "section_id": 1234, "title": "testCase2", "estimate": "30s", - "custom_automation_id": "Skipped test.testCase2" + "custom_automation_id": "Skipped test.testCase2", } requests_mock.post( create_url(f"add_case/{mocked_response_for_case['section_id']}"), json=mocked_response_for_case, ) - del api_request_handler_verify.suites_data_from_provider.testsections[ - 1 - ].testcases[0] + del api_request_handler_verify.suites_data_from_provider.testsections[1].testcases[0] resources_added, error = api_request_handler_verify.add_cases() assert error == "", "There should be no error in verification." mocked_response_for_case["estimate"] = "60s" - api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[ - 1 - ].case_id = None + api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[1].case_id = None resources_added, error = api_request_handler_verify.add_cases() - assert ( - error == FAULT_MAPPING["data_verification_error"] - ), "There should be error in verification." + assert error == FAULT_MAPPING["data_verification_error"], "There should be error in verification." @pytest.mark.api_handler - def test_delete_section( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_section(self, api_request_handler_verify: ApiRequestHandler, requests_mock): sections_id = [{"section_id": 1}] mocked_response_for_case = {"success": 200} @@ -879,9 +788,7 @@ def test_delete_section( assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_suite( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_suite(self, api_request_handler_verify: ApiRequestHandler, requests_mock): suite_id = 1 mocked_response_for_case = {"success": 200} @@ -894,9 +801,7 @@ def test_delete_suite( assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_cases( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_cases(self, api_request_handler_verify: ApiRequestHandler, requests_mock): suite_id = 1 cases = [{"case_id": 1}] mocked_response_for_case = {"success": 200} @@ -906,15 +811,11 @@ def test_delete_cases( json=mocked_response_for_case, ) - resources_added, error = api_request_handler_verify.delete_cases( - suite_id, cases - ) + resources_added, error = api_request_handler_verify.delete_cases(suite_id, cases) assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_run( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_run(self, api_request_handler_verify: ApiRequestHandler, requests_mock): run_id = 1 mocked_response_for_case = {"success": 200} @@ -925,3 +826,93 @@ def test_delete_run( resources_added, error = api_request_handler_verify.delete_run(run_id) assert error == "", "There should be no error in verification." + + @pytest.mark.api_handler + def test_upload_attachments_413_error(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path): + """Test that 413 errors (file too large) are properly reported.""" + run_id = 1 + + # Mock get_tests endpoint + mocked_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1001, "case_id": 100}], + } + requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response) + + # Create a temporary test file + test_file = tmp_path / "large_attachment.jpg" + test_file.write_text("test content") + + # Mock add_attachment_to_result endpoint to return 413 + requests_mock.post( + create_url("add_attachment_to_result/2001"), + status_code=413, + text='\n\n413 Request Entity Too Large\n\n

Request Entity Too Large

\n\n', + ) + + # Prepare test data + report_results = [{"case_id": 100, "attachments": [str(test_file)]}] + results = [{"id": 2001, "test_id": 1001}] + + # Call upload_attachments + api_request_handler.upload_attachments(report_results, results, run_id) + + # Verify the request was made (case-insensitive comparison) + assert requests_mock.last_request.url.lower() == create_url("add_attachment_to_result/2001").lower() + + @pytest.mark.api_handler + def test_upload_attachments_success(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path): + """Test that successful attachment uploads work correctly.""" + run_id = 1 + + # Mock get_tests endpoint + mocked_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1001, "case_id": 100}], + } + requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response) + + # Create a temporary test file + test_file = tmp_path / "test_attachment.jpg" + test_file.write_text("test content") + + # Mock add_attachment_to_result endpoint to return success + requests_mock.post(create_url("add_attachment_to_result/2001"), status_code=200, json={"attachment_id": 5001}) + + # Prepare test data + report_results = [{"case_id": 100, "attachments": [str(test_file)]}] + results = [{"id": 2001, "test_id": 1001}] + + # Call upload_attachments + api_request_handler.upload_attachments(report_results, results, run_id) + + # Verify the request was made (case-insensitive comparison) + assert requests_mock.last_request.url.lower() == create_url("add_attachment_to_result/2001").lower() + + @pytest.mark.api_handler + def test_upload_attachments_file_not_found(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test that missing attachment files are properly reported.""" + run_id = 1 + + # Mock get_tests endpoint + mocked_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1001, "case_id": 100}], + } + requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response) + + # Prepare test data with non-existent file + report_results = [{"case_id": 100, "attachments": ["/path/to/nonexistent/file.jpg"]}] + results = [{"id": 2001, "test_id": 1001}] + + # Call upload_attachments - should not raise exception + api_request_handler.upload_attachments(report_results, results, run_id) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 3dcd196..7c627a0 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1,4 +1,4 @@ -import html, json +import html, json, os from concurrent.futures import ThreadPoolExecutor, as_completed from beartype.typing import List, Union, Tuple, Dict @@ -7,7 +7,9 @@ from trcli.cli import Environment from trcli.constants import ( ProjectErrors, - FAULT_MAPPING, OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID, + FAULT_MAPPING, + OLD_SYSTEM_NAME_AUTOMATION_ID, + UPDATED_SYSTEM_NAME_AUTOMATION_ID, ) from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailCase, ProjectData @@ -33,7 +35,7 @@ def __init__( environment.case_fields, environment.run_description, environment.result_fields, - environment.section_id + environment.section_id, ) self.suites_data_from_provider = self.data_provider.suites_input self.response_verifier = ApiResponseVerify(verify) @@ -48,11 +50,11 @@ def check_automation_id_field(self, project_id: int) -> Union[str, None]: if not response.error_message: fields: List = response.response_text automation_id_field = next( - filter( + filter( lambda x: x["system_name"] in [OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID], - fields + fields, ), - None + None, ) if automation_id_field: if automation_id_field["is_active"] is False: @@ -79,11 +81,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project """ projects_data, error = self.__get_all_projects() if not error: - available_projects = [ - project - for project in projects_data - if project["name"] == project_name - ] + available_projects = [project for project in projects_data if project["name"] == project_name] if len(available_projects) == 1: return ProjectData( @@ -94,9 +92,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project elif len(available_projects) > 1: if project_id in [project["id"] for project in available_projects]: project_index = [ - index - for index, project in enumerate(available_projects) - if project["id"] == project_id + index for index, project in enumerate(available_projects) if project["id"] == project_id ][0] return ProjectData( project_id=int(available_projects[project_index]["id"]), @@ -131,11 +127,7 @@ def check_suite_id(self, project_id: int) -> Tuple[bool, str]: suite_id = self.suites_data_from_provider.suite_id suites_data, error = self.__get_all_suites(project_id) if not error: - available_suites = [ - suite - for suite in suites_data - if suite["id"] == suite_id - ] + available_suites = [suite for suite in suites_data if suite["id"] == suite_id] return ( (True, "") if len(available_suites) > 0 @@ -207,9 +199,7 @@ def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: response = self.client.send_post(f"add_suite/{project_id}", body) if not response.error_message: responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): + if not self.response_verifier.verify_returned_data(body, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] break @@ -224,9 +214,11 @@ def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: } for response in responses ] - self.data_provider.update_data(suite_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" + ( + self.data_provider.update_data(suite_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) return returned_resources, error_message def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: @@ -246,20 +238,24 @@ def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: if self.environment.section_id: if section.section_id in sections_by_id.keys(): section_json = sections_by_id[section.section_id] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) else: missing_test_sections = True if section.name in sections_by_name.keys(): section_json = sections_by_name[section.name] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) else: missing_test_sections = True self.data_provider.update_data(section_data=section_data) @@ -281,9 +277,7 @@ def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: response = self.client.send_post(f"add_section/{project_id}", body) if not response.error_message: responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): + if not self.response_verifier.verify_returned_data(body, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] break @@ -298,9 +292,11 @@ def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: } for response in responses ] - self.data_provider.update_data(section_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" + ( + self.data_provider.update_data(section_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) return returned_resources, error_message def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: @@ -327,12 +323,14 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: aut_id = test_case.custom_automation_id if aut_id in test_cases_by_aut_id.keys(): case = test_cases_by_aut_id[aut_id] - test_case_data.append({ - "case_id": case["id"], - "section_id": case["section_id"], - "title": case["title"], - OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id - }) + test_case_data.append( + { + "case_id": case["id"], + "section_id": case["section_id"], + "title": case["title"], + OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id, + } + ) else: missing_cases_number += 1 self.data_provider.update_data(case_data=test_case_data) @@ -386,25 +384,25 @@ def add_cases(self) -> Tuple[List[dict], str]: { "case_id": response.response_text["id"], "section_id": response.response_text["section_id"], - "title": response.response_text["title"] + "title": response.response_text["title"], } for response in responses ] return returned_resources, error_message def add_run( - self, - project_id: int, - run_name: str, - milestone_id: int = None, - start_date: str = None, - end_date: str = None, - plan_id: int = None, - config_ids: List[int] = None, - assigned_to_id: int = None, - include_all: bool = False, - refs: str = None, - case_ids: List[int] = None, + self, + project_id: int, + run_name: str, + milestone_id: int = None, + start_date: str = None, + end_date: str = None, + plan_id: int = None, + config_ids: List[int] = None, + assigned_to_id: int = None, + include_all: bool = False, + refs: str = None, + case_ids: List[int] = None, ) -> Tuple[int, str]: """ Creates a new test run. @@ -432,7 +430,7 @@ def add_run( "name": add_run_data["name"], "suite_id": add_run_data["suite_id"], "config_ids": config_ids, - "runs": [add_run_data] + "runs": [add_run_data], } else: entry_data = add_run_data @@ -440,8 +438,16 @@ def add_run( run_id = response.response_text["runs"][0]["id"] return run_id, response.error_message - def update_run(self, run_id: int, run_name: str, start_date: str = None, - end_date: str = None, milestone_id: int = None, refs: str = None, refs_action: str = 'add') -> Tuple[dict, str]: + def update_run( + self, + run_id: int, + run_name: str, + start_date: str = None, + end_date: str = None, + milestone_id: int = None, + refs: str = None, + refs_action: str = "add", + ) -> Tuple[dict, str]: """ Updates an existing run :run_id: run id @@ -453,12 +459,13 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, run_response = self.client.send_get(f"get_run/{run_id}") if run_response.error_message: return None, run_response.error_message - + existing_description = run_response.response_text.get("description", "") existing_refs = run_response.response_text.get("refs", "") - add_run_data = self.data_provider.add_run(run_name, start_date=start_date, - end_date=end_date, milestone_id=milestone_id) + add_run_data = self.data_provider.add_run( + run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id + ) add_run_data["description"] = existing_description # Retain the current description # Handle references based on action @@ -473,7 +480,7 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, report_case_ids = add_run_data["case_ids"] joint_case_ids = list(set(report_case_ids + run_case_ids)) add_run_data["case_ids"] = joint_case_ids - + plan_id = run_response.response_text["plan_id"] config_ids = run_response.response_text["config_ids"] if not plan_id: @@ -505,29 +512,29 @@ def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> """ if not existing_refs: existing_refs = "" - - if action == 'update': + + if action == "update": # Replace all references with new ones return new_refs - elif action == 'delete': + elif action == "delete": if not new_refs: # Delete all references return "" else: # Delete specific references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - refs_to_delete = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + refs_to_delete = [ref.strip() for ref in new_refs.split(",") if ref.strip()] updated_list = [ref for ref in existing_list if ref not in refs_to_delete] - return ','.join(updated_list) + return ",".join(updated_list) else: # action == 'add' (default) # Add new references to existing ones if not existing_refs: return new_refs - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - new_list = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + new_list = [ref.strip() for ref in new_refs.split(",") if ref.strip()] # Avoid duplicates combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] - return ','.join(combined_list) + return ",".join(combined_list) def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: """ @@ -540,11 +547,11 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic run_response = self.client.send_get(f"get_run/{run_id}") if run_response.error_message: return None, [], [], run_response.error_message - + existing_refs = run_response.response_text.get("refs", "") or "" - + # Parse existing and new references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] # Deduplicate input references new_list = [] seen = set() @@ -553,28 +560,33 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic if ref_clean and ref_clean not in seen: new_list.append(ref_clean) seen.add(ref_clean) - + # Determine which references are new vs duplicates added_refs = [ref for ref in new_list if ref not in existing_list] skipped_refs = [ref for ref in new_list if ref in existing_list] - + # If no new references to add, return current state if not added_refs: return run_response.response_text, added_refs, skipped_refs, None - + # Combine references combined_list = existing_list + added_refs - combined_refs = ','.join(combined_list) - + combined_refs = ",".join(combined_list) + if len(combined_refs) > 250: - return None, [], [], f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit" - + return ( + None, + [], + [], + f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit", + ) + update_data = {"refs": combined_refs} - + # Determine the correct API endpoint based on plan membership plan_id = run_response.response_text.get("plan_id") config_ids = run_response.response_text.get("config_ids") - + if not plan_id: # Standalone run update_response = self.client.send_post(f"update_run/{run_id}", update_data) @@ -586,7 +598,7 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic plan_response = self.client.send_get(f"get_plan/{plan_id}") if plan_response.error_message: return None, [], [], f"Failed to get plan details: {plan_response.error_message}" - + # Find the entry_id for this run entry_id = None for entry in plan_response.response_text.get("entries", []): @@ -596,19 +608,21 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic break if entry_id: break - + if not entry_id: return None, [], [], f"Could not find plan entry for run {run_id}" - + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) - + if update_response.error_message: return None, [], [], update_response.error_message - + updated_run_response = self.client.send_get(f"get_run/{run_id}") return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message - def update_existing_case_references(self, case_id: int, junit_refs: str, strategy: str = "append") -> Tuple[bool, str, List[str], List[str]]: + def update_existing_case_references( + self, case_id: int, junit_refs: str, strategy: str = "append" + ) -> Tuple[bool, str, List[str], List[str]]: """ Update existing case references with values from JUnit properties. :param case_id: ID of the test case @@ -618,64 +632,72 @@ def update_existing_case_references(self, case_id: int, junit_refs: str, strateg """ if not junit_refs or not junit_refs.strip(): return True, None, [], [] # No references to process - + # Parse and validate JUnit references, deduplicating input junit_ref_list = [] seen = set() - for ref in junit_refs.split(','): + for ref in junit_refs.split(","): ref_clean = ref.strip() if ref_clean and ref_clean not in seen: junit_ref_list.append(ref_clean) seen.add(ref_clean) - + if not junit_ref_list: return False, "No valid references found in JUnit property", [], [] - + # Get current case data case_response = self.client.send_get(f"get_case/{case_id}") if case_response.error_message: return False, case_response.error_message, [], [] - - existing_refs = case_response.response_text.get('refs', '') or '' - + + existing_refs = case_response.response_text.get("refs", "") or "" + if strategy == "replace": # Replace strategy: use JUnit refs as-is - new_refs = ','.join(junit_ref_list) + new_refs = ",".join(junit_ref_list) added_refs = junit_ref_list skipped_refs = [] else: # Append strategy: combine with existing refs, avoiding duplicates - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] - + existing_ref_list = ( + [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] + ) + # Determine which references are new vs duplicates added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] - + # If no new references to add, return current state if not added_refs: return True, None, added_refs, skipped_refs - + # Combine references combined_list = existing_ref_list + added_refs - new_refs = ','.join(combined_list) - + new_refs = ",".join(combined_list) + # Validate 2000 character limit for test case references if len(new_refs) > 2000: - return False, f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", [], [] - + return ( + False, + f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", + [], + [], + ) + # Update the case update_data = {"refs": new_refs} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.error_message: return False, update_response.error_message, [], [] - + return True, None, added_refs, skipped_refs def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): - """ Getting test result id and upload attachments for it. """ + """Getting test result id and upload attachments for it.""" tests_in_run, error = self.__get_all_tests_in_run(run_id) if not error: + failed_uploads = [] for report_result in report_results: case_id = report_result["case_id"] test_id = next((test["id"] for test in tests_in_run if test["case_id"] == case_id), None) @@ -683,9 +705,41 @@ def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id for file_path in report_result.get("attachments"): try: with open(file_path, "rb") as file: - self.client.send_post(f"add_attachment_to_result/{result_id}", files={"attachment": file}) + response = self.client.send_post( + f"add_attachment_to_result/{result_id}", files={"attachment": file} + ) + + # Check if upload was successful + if response.status_code != 200: + file_name = os.path.basename(file_path) + + # Handle 413 Request Entity Too Large specifically + if response.status_code == 413: + error_msg = FAULT_MAPPING["attachment_too_large"].format( + file_name=file_name, case_id=case_id + ) + self.environment.elog(error_msg) + failed_uploads.append(f"{file_name} (case {case_id})") + else: + # Handle other HTTP errors + error_msg = FAULT_MAPPING["attachment_upload_failed"].format( + file_path=file_name, + case_id=case_id, + error_message=response.error_message or f"HTTP {response.status_code}", + ) + self.environment.elog(error_msg) + failed_uploads.append(f"{file_name} (case {case_id})") + except FileNotFoundError: + self.environment.elog(f"Attachment file not found: {file_path} (case {case_id})") + failed_uploads.append(f"{file_path} (case {case_id})") except Exception as ex: - self.environment.elog(f"Error uploading attachment for case {case_id}: {ex}") + file_name = os.path.basename(file_path) if os.path.exists(file_path) else file_path + self.environment.elog(f"Error uploading attachment '{file_name}' for case {case_id}: {ex}") + failed_uploads.append(f"{file_name} (case {case_id})") + + # Provide a summary if there were failed uploads + if failed_uploads: + self.environment.log(f"\nWarning: {len(failed_uploads)} attachment(s) failed to upload.") else: self.environment.elog(f"Unable to upload attachments due to API request error: {error}") @@ -698,26 +752,18 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: responses = [] error_message = "" # Get pre-validated user IDs if available - user_ids = getattr(self.environment, '_validated_user_ids', []) - - add_results_data_chunks = self.data_provider.add_results_for_cases( - self.environment.batch_size, user_ids - ) + user_ids = getattr(self.environment, "_validated_user_ids", []) + + add_results_data_chunks = self.data_provider.add_results_for_cases(self.environment.batch_size, user_ids) # Get assigned count from data provider - assigned_count = getattr(self.data_provider, '_assigned_count', 0) - - results_amount = sum( - [len(results["results"]) for results in add_results_data_chunks] - ) + assigned_count = getattr(self.data_provider, "_assigned_count", 0) - with self.environment.get_progress_bar( - results_amount=results_amount, prefix="Adding results" - ) as progress_bar: + results_amount = sum([len(results["results"]) for results in add_results_data_chunks]) + + with self.environment.get_progress_bar(results_amount=results_amount, prefix="Adding results") as progress_bar: with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: futures = { - executor.submit( - self.client.send_post, f"add_results_for_cases/{run_id}", body - ): body + executor.submit(self.client.send_post, f"add_results_for_cases/{run_id}", body): body for body in add_results_data_chunks } responses, error_message = self.handle_futures( @@ -730,11 +776,7 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: # Iterate through futures to get all responses from done tasks (not cancelled) responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) responses = [response.response_text for response in responses] - results = [ - result - for results_list in responses - for result in results_list - ] + results = [result for results_list in responses for result in results_list] report_results_w_attachments = [] for results_data_chunk in add_results_data_chunks: for test_result in results_data_chunk["results"]: @@ -744,22 +786,22 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: attachments_count = 0 for result in report_results_w_attachments: attachments_count += len(result["attachments"]) - self.environment.log(f"Uploading {attachments_count} attachments " - f"for {len(report_results_w_attachments)} test results.") + self.environment.log( + f"Uploading {attachments_count} attachments " f"for {len(report_results_w_attachments)} test results." + ) self.upload_attachments(report_results_w_attachments, results, run_id) else: self.environment.log(f"No attachments found to upload.") - + # Log assignment results if assignment was performed if user_ids: - total_failed = getattr(self.data_provider, '_total_failed_count', assigned_count) + total_failed = getattr(self.data_provider, "_total_failed_count", assigned_count) if assigned_count > 0: self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") else: self.environment.log(f"Assigning failed results: 0/0, Done.") - - return responses, error_message, progress_bar.n + return responses, error_message, progress_bar.n def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, str]: responses = [] @@ -776,9 +818,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st if action_string == "add_case": arguments = arguments.to_dict() arguments.pop("case_id") - if not self.response_verifier.verify_returned_data( - arguments, response.response_text - ): + if not self.response_verifier.verify_returned_data(arguments, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] self.__cancel_running_futures(futures, action_string) @@ -786,9 +826,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st progress_bar.update(1) else: error_message = response.error_message - self.environment.log( - f"\nError during {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nError during {action_string}. Trying to cancel scheduled tasks.") self.__cancel_running_futures(futures, action_string) break else: @@ -826,9 +864,7 @@ def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: responses = [] error_message = "" for section in added_sections: - response = self.client.send_post( - f"delete_section/{section['section_id']}", payload={} - ) + response = self.client.send_post(f"delete_section/{section['section_id']}", payload={}) if not response.error_message: responses.append(response.response_text) else: @@ -868,45 +904,52 @@ def retrieve_results_after_cancelling(futures) -> list: def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: """ Validates a user email and returns the user ID if valid. - + :param email: User email to validate :returns: Tuple with user ID (or None if not found) and error message """ if not email or not email.strip(): return None, "Email cannot be empty" - + email = email.strip() # Use proper URL encoding for the query parameter import urllib.parse + encoded_email = urllib.parse.quote_plus(email) response = self.client.send_get(f"get_user_by_email&email={encoded_email}") - + if response.error_message: # Map TestRail's email validation error to our expected format if "Field :email is not a valid email address" in response.error_message: return None, f"User not found: {email}" return None, response.error_message - + if response.status_code == 200: try: user_data = response.response_text - if isinstance(user_data, dict) and 'id' in user_data: - return user_data['id'], "" + if isinstance(user_data, dict) and "id" in user_data: + return user_data["id"], "" else: return None, f"Invalid response format for user: {email}" except (KeyError, TypeError): return None, f"Invalid response format for user: {email}" elif response.status_code == 400: # Check if the response contains the email validation error - if (hasattr(response, 'response_text') and response.response_text and - isinstance(response.response_text, dict) and - "Field :email is not a valid email address" in str(response.response_text.get('error', ''))): + if ( + hasattr(response, "response_text") + and response.response_text + and isinstance(response.response_text, dict) + and "Field :email is not a valid email address" in str(response.response_text.get("error", "")) + ): return None, f"User not found: {email}" return None, f"User not found: {email}" else: # For other status codes, check if it's the email validation error - if (hasattr(response, 'response_text') and response.response_text and - "Field :email is not a valid email address" in str(response.response_text)): + if ( + hasattr(response, "response_text") + and response.response_text + and "Field :email is not a valid email address" in str(response.response_text) + ): return None, f"User not found: {email}" return None, f"API error (status {response.status_code}) when validating user: {email}" @@ -925,9 +968,7 @@ def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: return response def __cancel_running_futures(self, futures, action_string): - self.environment.log( - f"\nAborting: {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nAborting: {action_string}. Trying to cancel scheduled tasks.") for future in futures: future.cancel() @@ -936,33 +977,33 @@ def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], s Get all cases from all pages """ if suite_id is None: - return self.__get_all_entities('cases', f"get_cases/{project_id}") + return self.__get_all_entities("cases", f"get_cases/{project_id}") else: - return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") + return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}") def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ Get all sections from all pages """ - return self.__get_all_entities('sections', f"get_sections/{project_id}&suite_id={suite_id}") + return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}") def __get_all_tests_in_run(self, run_id=None) -> Tuple[List[dict], str]: """ Get all tests from all pages """ - return self.__get_all_entities('tests', f"get_tests/{run_id}") + return self.__get_all_entities("tests", f"get_tests/{run_id}") def __get_all_projects(self) -> Tuple[List[dict], str]: """ Get all projects from all pages """ - return self.__get_all_entities('projects', f"get_projects") + return self.__get_all_entities("projects", f"get_projects") def __get_all_suites(self, project_id) -> Tuple[List[dict], str]: """ Get all suites from all pages """ - return self.__get_all_entities('suites', f"get_suites/{project_id}") + return self.__get_all_entities("suites", f"get_suites/{project_id}") def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[Dict], str]: """ @@ -979,9 +1020,7 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ return response.response_text, response.error_message # Check if response is a string (JSON parse failed) if isinstance(response.response_text, str): - error_msg = FAULT_MAPPING["invalid_api_response"].format( - error_details=response.response_text[:200] - ) + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) return [], error_msg # Endpoints with pagination entities = entities + response.response_text[entity] @@ -1002,7 +1041,7 @@ def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: :returns: Tuple with created label data and error string """ # Use multipart/form-data like the working CURL command - files = {'title': (None, title)} + files = {"title": (None, title)} response = self.client.send_post(f"add_label/{project_id}", payload=None, files=files) return response.response_text, response.error_message @@ -1016,8 +1055,8 @@ def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict """ # Use multipart/form-data like add_label files = { - 'project_id': (None, str(project_id)), - 'title': (None, title) # Field name is 'title' (no colon) for form data + "project_id": (None, str(project_id)), + "title": (None, title), # Field name is 'title' (no colon) for form data } response = self.client.send_post(f"update_label/{label_id}", payload=None, files=files) return response.response_text, response.error_message @@ -1044,11 +1083,11 @@ def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tupl params.append(f"offset={offset}") if limit != 250: params.append(f"limit={limit}") - + url = f"get_labels/{project_id}" if params: url += "&" + "&".join(params) - + response = self.client.send_get(url) return response.response_text, response.error_message @@ -1070,16 +1109,19 @@ def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: """ # Send as form data with JSON array format import json + label_ids_json = json.dumps(label_ids) files = {"label_ids": (None, label_ids_json)} response = self.client.send_post("delete_labels", payload=None, files=files) success = response.status_code == 200 return success, response.error_message - def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, suite_id: int = None) -> Tuple[dict, str]: + def add_labels_to_cases( + self, case_ids: List[int], title: str, project_id: int, suite_id: int = None + ) -> Tuple[dict, str]: """ Add a label to multiple test cases - + :param case_ids: List of test case IDs :param title: Label title (max 20 characters) :param project_id: Project ID for validation @@ -1087,122 +1129,113 @@ def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, :returns: Tuple with response data and error string """ # Initialize results structure - results = { - 'successful_cases': [], - 'failed_cases': [], - 'max_labels_reached': [], - 'case_not_found': [] - } - + results = {"successful_cases": [], "failed_cases": [], "max_labels_reached": [], "case_not_found": []} + # Check if project is multi-suite by getting all cases without suite_id all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) if error_message: return results, error_message - + # Check if project has multiple suites suite_ids = set() for case in all_cases_no_suite: - if 'suite_id' in case and case['suite_id']: - suite_ids.add(case['suite_id']) - + if "suite_id" in case and case["suite_id"]: + suite_ids.add(case["suite_id"]) + # If project has multiple suites and no suite_id provided, require it if len(suite_ids) > 1 and suite_id is None: return results, "This project is multisuite, suite id is required" - + # Get all cases to validate that the provided case IDs exist all_cases, error_message = self.__get_all_cases(project_id, suite_id) if error_message: return results, error_message - + # Create a set of existing case IDs for quick lookup - existing_case_ids = {case['id'] for case in all_cases} - + existing_case_ids = {case["id"] for case in all_cases} + # Validate case IDs and separate valid from invalid ones invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] - + # Record invalid case IDs for case_id in invalid_case_ids: - results['case_not_found'].append(case_id) - + results["case_not_found"].append(case_id) + # If no valid case IDs, return early if not valid_case_ids: return results, "" - + # Check if label exists or create it existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - + # Find existing label with the same title label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") break - + # Create label if it doesn't exist if label_id is None: label_data, error_message = self.add_label(project_id, title) if error_message: return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + # Collect case data and validate constraints cases_to_update = [] for case_id in valid_case_ids: # Get current case to check existing labels case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: - results['failed_cases'].append({ - 'case_id': case_id, - 'error': f"Could not retrieve case {case_id}: {case_response.error_message}" - }) + results["failed_cases"].append( + {"case_id": case_id, "error": f"Could not retrieve case {case_id}: {case_response.error_message}"} + ) continue - + case_data = case_response.response_text - current_labels = case_data.get('labels', []) - + current_labels = case_data.get("labels", []) + # Check if label already exists on this case - if any(label.get('id') == label_id for label in current_labels): - results['successful_cases'].append({ - 'case_id': case_id, - 'message': f"Label '{title}' already exists on case {case_id}" - }) + if any(label.get("id") == label_id for label in current_labels): + results["successful_cases"].append( + {"case_id": case_id, "message": f"Label '{title}' already exists on case {case_id}"} + ) continue - + # Check maximum labels limit (10) if len(current_labels) >= 10: - results['max_labels_reached'].append(case_id) + results["max_labels_reached"].append(case_id) continue - + # Prepare case for update - existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] + existing_label_ids = [label.get("id") for label in current_labels if label.get("id")] updated_label_ids = existing_label_ids + [label_id] - cases_to_update.append({ - 'case_id': case_id, - 'labels': updated_label_ids - }) - + cases_to_update.append({"case_id": case_id, "labels": updated_label_ids}) + # Update cases using appropriate endpoint if len(cases_to_update) == 1: # Single case: use update_case/{case_id} case_info = cases_to_update[0] - case_update_data = {'labels': case_info['labels']} - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) elif len(cases_to_update) > 1: # Multiple cases: use update_cases/{suite_id} # Need to determine suite_id from the cases @@ -1210,62 +1243,72 @@ def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, if not case_suite_id: # Get suite_id from the first case if not provided first_case = all_cases[0] if all_cases else None - case_suite_id = first_case.get('suite_id') if first_case else None - + case_suite_id = first_case.get("suite_id") if first_case else None + if not case_suite_id: # Fall back to individual updates if no suite_id available for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) else: # Batch update using update_cases/{suite_id} batch_update_data = { - 'case_ids': [case_info['case_id'] for case_info in cases_to_update], - 'labels': cases_to_update[0]['labels'] # Assuming same labels for all cases + "case_ids": [case_info["case_id"] for case_info in cases_to_update], + "labels": cases_to_update[0]["labels"], # Assuming same labels for all cases } - + batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) - + if batch_response.status_code == 200: for case_info in cases_to_update: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: # If batch update fails, fall back to individual updates for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) - + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + return results, "" - def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: + def get_cases_by_label( + self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None + ) -> Tuple[List[dict], str]: """ Get test cases filtered by label ID or title - + :param project_id: Project ID :param suite_id: Suite ID (optional) :param label_ids: List of label IDs to filter by @@ -1276,234 +1319,228 @@ def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: L all_cases, error_message = self.__get_all_cases(project_id, suite_id) if error_message: return [], error_message - + # If filtering by title, first get the label ID target_label_ids = label_ids or [] if label_title and not target_label_ids: labels_data, error_message = self.get_labels(project_id) if error_message: return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + if not target_label_ids: return [], "" # No label found is a valid case with 0 results - + # Filter cases that have any of the target labels matching_cases = [] for case in all_cases: - case_labels = case.get('labels', []) - case_label_ids = [label.get('id') for label in case_labels] - + case_labels = case.get("labels", []) + case_label_ids = [label.get("id") for label in case_labels] + # Check if any of the target label IDs are present in this case if any(label_id in case_label_ids for label_id in target_label_ids): matching_cases.append(case) - + return matching_cases, "" - def add_labels_to_tests(self, test_ids: List[int], titles: Union[str, List[str]], project_id: int) -> Tuple[dict, str]: + def add_labels_to_tests( + self, test_ids: List[int], titles: Union[str, List[str]], project_id: int + ) -> Tuple[dict, str]: """ Add labels to multiple tests - + :param test_ids: List of test IDs :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) :param project_id: Project ID for validation :returns: Tuple with response data and error string """ # Initialize results structure - results = { - 'successful_tests': [], - 'failed_tests': [], - 'max_labels_reached': [], - 'test_not_found': [] - } - + results = {"successful_tests": [], "failed_tests": [], "max_labels_reached": [], "test_not_found": []} + # Normalize titles to a list if isinstance(titles, str): title_list = [titles] else: title_list = titles - + # At this point, title_list should already be validated by the CLI # Just ensure we have clean titles title_list = [title.strip() for title in title_list if title.strip()] - + if not title_list: return {}, "No valid labels provided" - + # Validate test IDs by getting run information for each test valid_test_ids = [] for test_id in test_ids: # Get test information to validate it exists test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) continue - + test_data = test_response.response_text # Validate that the test belongs to the correct project - run_id = test_data.get('run_id') + run_id = test_data.get("run_id") if run_id: run_response = self.client.send_get(f"get_run/{run_id}") if run_response.status_code == 200: run_data = run_response.response_text - if run_data.get('project_id') == project_id: + if run_data.get("project_id") == project_id: valid_test_ids.append(test_id) else: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) else: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) else: - results['test_not_found'].append(test_id) - + results["test_not_found"].append(test_id) + # If no valid test IDs, return early if not valid_test_ids: return results, "" - + # Check if labels exist or create them existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - + # Process each title to get/create label IDs label_ids = [] label_id_to_title = {} # Map label IDs to their titles for title in title_list: # Find existing label with the same title label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") break - + # Create label if it doesn't exist if label_id is None: label_data, error_message = self.add_label(project_id, title) if error_message: return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + if label_id: label_ids.append(label_id) label_id_to_title[label_id] = title - + # Collect test data and validate constraints tests_to_update = [] for test_id in valid_test_ids: # Get current test to check existing labels test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results['failed_tests'].append({ - 'test_id': test_id, - 'error': f"Could not retrieve test {test_id}: {test_response.error_message}" - }) + results["failed_tests"].append( + {"test_id": test_id, "error": f"Could not retrieve test {test_id}: {test_response.error_message}"} + ) continue - + test_data = test_response.response_text - current_labels = test_data.get('labels', []) - current_label_ids = [label.get('id') for label in current_labels if label.get('id')] - + current_labels = test_data.get("labels", []) + current_label_ids = [label.get("id") for label in current_labels if label.get("id")] + new_label_ids = [] already_exists_titles = [] - + for label_id in label_ids: if label_id not in current_label_ids: new_label_ids.append(label_id) else: if label_id in label_id_to_title: already_exists_titles.append(label_id_to_title[label_id]) - + if not new_label_ids: - results['successful_tests'].append({ - 'test_id': test_id, - 'message': f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}" - }) + results["successful_tests"].append( + { + "test_id": test_id, + "message": f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}", + } + ) continue - + # Check maximum labels limit (10) if len(current_label_ids) + len(new_label_ids) > 10: - results['max_labels_reached'].append(test_id) + results["max_labels_reached"].append(test_id) continue - + # Prepare test for update updated_label_ids = current_label_ids + new_label_ids - + new_label_titles = [] for label_id in new_label_ids: if label_id in label_id_to_title: new_label_titles.append(label_id_to_title[label_id]) - - tests_to_update.append({ - 'test_id': test_id, - 'labels': updated_label_ids, - 'new_labels': new_label_ids, - 'new_label_titles': new_label_titles - }) - + + tests_to_update.append( + { + "test_id": test_id, + "labels": updated_label_ids, + "new_labels": new_label_ids, + "new_label_titles": new_label_titles, + } + ) + # Update tests using appropriate endpoint if len(tests_to_update) == 1: # Single test: use update_test/{test_id} test_info = tests_to_update[0] - test_update_data = {'labels': test_info['labels']} - + test_update_data = {"labels": test_info["labels"]} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - + if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) + new_label_titles = test_info.get("new_label_titles", []) new_label_count = len(new_label_titles) - + if new_label_count == 1: message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" elif new_label_count > 1: message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" else: message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) else: # Multiple tests: use individual updates to ensure each test gets its specific labels for test_info in tests_to_update: - test_update_data = {'labels': test_info['labels']} + test_update_data = {"labels": test_info["labels"]} update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - + if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) + new_label_titles = test_info.get("new_label_titles", []) new_label_count = len(new_label_titles) - + if new_label_count == 1: message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" elif new_label_count > 1: message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" else: message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) - + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) + return results, "" - def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None) -> Tuple[List[dict], str]: + def get_tests_by_label( + self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None + ) -> Tuple[List[dict], str]: """ Get tests filtered by label ID or title from specific runs - + :param project_id: Project ID :param label_ids: List of label IDs to filter by :param label_title: Label title to filter by @@ -1516,14 +1553,14 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label labels_data, error_message = self.get_labels(project_id) if error_message: return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + if not target_label_ids: return [], "" # No label found is a valid case with 0 results - + # Get runs for the project (either all runs or specific run IDs) if run_ids: # Use specific run IDs - validate they exist by getting run details @@ -1539,67 +1576,65 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label runs_response = self.client.send_get(f"get_runs/{project_id}") if runs_response.status_code != 200: return [], runs_response.error_message - + runs_data = runs_response.response_text - runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data - + runs = runs_data.get("runs", []) if isinstance(runs_data, dict) else runs_data + # Collect all tests from all runs matching_tests = [] for run in runs: - run_id = run.get('id') + run_id = run.get("id") if not run_id: continue - + # Get tests for this run tests_response = self.client.send_get(f"get_tests/{run_id}") if tests_response.status_code != 200: continue # Skip this run if we can't get tests - + tests_data = tests_response.response_text - tests = tests_data.get('tests', []) if isinstance(tests_data, dict) else tests_data - + tests = tests_data.get("tests", []) if isinstance(tests_data, dict) else tests_data + # Filter tests that have any of the target labels for test in tests: - test_labels = test.get('labels', []) - test_label_ids = [label.get('id') for label in test_labels] - + test_labels = test.get("labels", []) + test_label_ids = [label.get("id") for label in test_labels] + # Check if any of the target label IDs are present in this test if any(label_id in test_label_ids for label_id in target_label_ids): matching_tests.append(test) - + return matching_tests, "" def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: """ Get labels for specific tests - + :param test_ids: List of test IDs to get labels for :returns: Tuple with list of test label information and error string """ results = [] - + for test_id in test_ids: # Get test information test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results.append({ - 'test_id': test_id, - 'error': f"Test {test_id} not found or inaccessible", - 'labels': [] - }) + results.append({"test_id": test_id, "error": f"Test {test_id} not found or inaccessible", "labels": []}) continue - + test_data = test_response.response_text - test_labels = test_data.get('labels', []) - - results.append({ - 'test_id': test_id, - 'title': test_data.get('title', 'Unknown'), - 'status_id': test_data.get('status_id'), - 'labels': test_labels, - 'error': None - }) - + test_labels = test_data.get("labels", []) + + results.append( + { + "test_id": test_id, + "title": test_data.get("title", "Unknown"), + "status_id": test_data.get("status_id"), + "labels": test_labels, + "error": None, + } + ) + return results, "" # Test case reference management methods @@ -1614,15 +1649,15 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - + case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - + existing_refs = case_data.get("refs", "") or "" + # Parse existing references existing_ref_list = [] if existing_refs: - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - + existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Deduplicate input references while preserving order deduplicated_input = [] seen = set() @@ -1631,24 +1666,24 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool if ref_clean and ref_clean not in seen: deduplicated_input.append(ref_clean) seen.add(ref_clean) - + # Add new references (avoid duplicates with existing) all_refs = existing_ref_list.copy() for ref in deduplicated_input: if ref not in all_refs: all_refs.append(ref) - + # Join all references - new_refs_string = ','.join(all_refs) - + new_refs_string = ",".join(all_refs) + # Validate total character limit if len(new_refs_string) > 2000: return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - + # Update the test case with new references - update_data = {'refs': new_refs_string} + update_data = {"refs": new_refs_string} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: @@ -1669,18 +1704,18 @@ def update_case_references(self, case_id: int, references: List[str]) -> Tuple[b if ref_clean and ref_clean not in seen: deduplicated_refs.append(ref_clean) seen.add(ref_clean) - + # Join references - new_refs_string = ','.join(deduplicated_refs) - + new_refs_string = ",".join(deduplicated_refs) + # Validate total character limit if len(new_refs_string) > 2000: return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - + # Update the test case with new references - update_data = {'refs': new_refs_string} + update_data = {"refs": new_refs_string} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: @@ -1695,36 +1730,36 @@ def delete_case_references(self, case_id: int, specific_references: List[str] = """ if specific_references is None: # Delete all references by setting refs to empty string - update_data = {'refs': ''} + update_data = {"refs": ""} else: # First get the current test case to retrieve existing references case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - + case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - + existing_refs = case_data.get("refs", "") or "" + if not existing_refs: # No references to delete return True, "" - + # Parse existing references - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - + existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Deduplicate input references for efficient processing refs_to_delete = set(ref.strip() for ref in specific_references if ref.strip()) - + # Remove specific references remaining_refs = [ref for ref in existing_ref_list if ref not in refs_to_delete] - + # Join remaining references - new_refs_string = ','.join(remaining_refs) - update_data = {'refs': new_refs_string} - + new_refs_string = ",".join(remaining_refs) + update_data = {"refs": new_refs_string} + # Update the test case update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: diff --git a/trcli/constants.py b/trcli/constants.py index 0dc9fec..6106be7 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -5,9 +5,7 @@ missing_file="Please provide a valid path to your results file with the -f argument.", ) -PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING = dict( - missing_title="Please give your Test Run a title using the --title argument." -) +PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING = dict(missing_title="Please give your Test Run a title using the --title argument.") ADD_RUN_FAULT_MAPPING = dict( missing_title="Please give your Test Run a title using the --title argument.", @@ -58,18 +56,21 @@ f" - System Name: automation_id\n" f" - Type: Text (or String)\n" f" - Is Active: True", - proxy_connection_error= "Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available.", - proxy_authentication_failed= "Proxy authentication failed for proxy. Please verify the username and password.", - proxy_timeout= "The connection to the proxy server timed out. Please try again later or check the proxy server's availability.", - proxy_bypass_error= "Failed to bypass the proxy for host. Please check the settings.", - proxy_invalid_configuration= "The provided proxy configuration is invalid. Please check the proxy URL and format.", - ssl_error_on_proxy= "SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.", - no_proxy_match_error= "The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.", - no_suites_found= "The project {project_id} does not have any suites.", - invalid_json_response= "Received invalid response from TestRail server (HTTP {status_code}). " + proxy_connection_error="Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available.", + proxy_authentication_failed="Proxy authentication failed for proxy. Please verify the username and password.", + proxy_timeout="The connection to the proxy server timed out. Please try again later or check the proxy server's availability.", + proxy_bypass_error="Failed to bypass the proxy for host. Please check the settings.", + proxy_invalid_configuration="The provided proxy configuration is invalid. Please check the proxy URL and format.", + ssl_error_on_proxy="SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.", + no_proxy_match_error="The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.", + no_suites_found="The project {project_id} does not have any suites.", + invalid_json_response="Received invalid response from TestRail server (HTTP {status_code}). " "Please verify your TestRail host URL (-h) is correct and points to a valid TestRail instance. " "Response preview: {response_preview}", - invalid_api_response= "Invalid response from TestRail API: {error_details}" + invalid_api_response="Invalid response from TestRail API: {error_details}", + attachment_upload_failed="Failed to upload attachment '{file_path}' for case {case_id}: {error_message}", + attachment_too_large="Failed to upload attachment '{file_name}' for case {case_id}: " + "File size exceeds TestRail's maximum limit of 250 MB. Please reduce the file size or exclude this attachment.", ) COMMAND_FAULT_MAPPING = dict( @@ -128,6 +129,7 @@ class RevertMessages: run_deleted = "Deleted created run" run_not_deleted = "Unable to delete created run: {error}" + OLD_SYSTEM_NAME_AUTOMATION_ID = "custom_automation_id" # field name mismatch on testrail side (can not reproduce in cloud version TestRail v9.1.2) -UPDATED_SYSTEM_NAME_AUTOMATION_ID = "custom_case_automation_id" \ No newline at end of file +UPDATED_SYSTEM_NAME_AUTOMATION_ID = "custom_case_automation_id" From e567d6d30d888a88060c7db607dc571810a01994 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 27 Nov 2025 12:59:31 +0800 Subject: [PATCH 07/10] TRCLI-207 fixed deletion issue when updating test runs, also added unit tests --- tests/test_api_request_handler.py | 595 ++++++++++++-------- trcli/api/api_request_handler.py | 895 +++++++++++++++--------------- 2 files changed, 820 insertions(+), 670 deletions(-) diff --git a/tests/test_api_request_handler.py b/tests/test_api_request_handler.py index 4f17c37..04f7bc7 100644 --- a/tests/test_api_request_handler.py +++ b/tests/test_api_request_handler.py @@ -23,9 +23,7 @@ def _make_handler(verify=False, custom_json=None): environment.batch_size = 10 environment.case_matcher = MatchersParser.AUTO if custom_json is None: - json_path = ( - Path(__file__).parent / "test_data/json/api_request_handler.json" - ) + json_path = Path(__file__).parent / "test_data/json/api_request_handler.json" else: json_path = custom_json file_json = open(json_path) @@ -49,17 +47,13 @@ def api_request_handler_verify(handler_maker): @pytest.fixture(scope="function") def api_request_handler_update_case_json(handler_maker): - json_path = ( - Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" - ) + json_path = Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" yield handler_maker(custom_json=json_path, verify=False) class TestApiRequestHandler: @pytest.mark.api_handler - def test_return_project( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project(self, api_request_handler: ApiRequestHandler, requests_mock): mocked_response = { "offset": 0, "limit": 250, @@ -72,7 +66,7 @@ def test_return_project( {"id": 1, "name": "DataHub", "suite_mode": 1}, {"id": 2, "name": "Test Project", "suite_mode": 1}, {"id": 3, "name": "DataHub", "suite_mode": 1}, - ] + ], } requests_mock.get(create_url("get_projects"), json=mocked_response) assert api_request_handler.get_project_data("Test Project") == ProjectData( @@ -107,9 +101,7 @@ def test_return_project( ), "Get project should return proper project data object" @pytest.mark.api_handler - def test_return_project_legacy_response( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project_legacy_response(self, api_request_handler: ApiRequestHandler, requests_mock): mocked_response = [ {"id": 1, "name": "DataHub", "suite_mode": 1}, {"id": 2, "name": "Test Project", "suite_mode": 1}, @@ -131,15 +123,15 @@ def test_return_project_legacy_response_with_buggy_authentication_prefix( {"id": 3, "name": "DataHub", "suite_mode": 1}, ] - requests_mock.get(create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n"+json.dumps(mocked_response)) + requests_mock.get( + create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n" + json.dumps(mocked_response) + ) assert api_request_handler.get_project_data("Test Project") == ProjectData( project_id=2, suite_mode=1, error_message="" ), "Get project should return proper project data object" @pytest.mark.api_handler - def test_check_suite_exists( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_check_suite_exists(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, @@ -155,7 +147,7 @@ def test_check_suite_exists( False, FAULT_MAPPING["missing_suite"].format(suite_id=6), ), "Given suite id should NOT exist in mocked response." - + @pytest.mark.api_handler def test_check_suite_exists_with_pagination(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 @@ -167,7 +159,7 @@ def test_check_suite_exists_with_pagination(self, api_request_handler: ApiReques "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, - ] + ], } requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) @@ -207,16 +199,13 @@ def test_add_suite(self, api_request_handler: ApiRequestHandler, requests_mock): assert error == "", "Error occurred in add_suite" assert ( - api_request_handler.suites_data_from_provider.suite_id - == mocked_response["id"] + api_request_handler.suites_data_from_provider.suite_id == mocked_response["id"] ), "Added suite id in DataProvider doesn't match mocked response id." @pytest.mark.api_handler - def test_check_missing_sections_true( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_sections_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "_links": {"next": None, "prev": None}, "sections": [ @@ -225,25 +214,19 @@ def test_check_missing_sections_true( "suite_id": 4, "name": "Skipped test", } - ] + ], } - requests_mock.get( - create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response - ) + requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response) missing, _ = api_request_handler.check_missing_section_ids(project_id) - update_data_mock.assert_called_with( - section_data=[{'section_id': 0, 'suite_id': 4, 'name': 'Skipped test'}] - ) + update_data_mock.assert_called_with(section_data=[{"section_id": 0, "suite_id": 4, "name": "Skipped test"}]) assert missing, "There should be missing section" @pytest.mark.api_handler - def test_check_missing_sections_false( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_sections_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "_links": {"next": None, "prev": None}, "sections": [ @@ -256,19 +239,17 @@ def test_check_missing_sections_false( "id": 2, "suite_id": 4, "name": "Passed test", - } - ] + }, + ], } - requests_mock.get( - create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response - ) + requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response) missing, _ = api_request_handler.check_missing_section_ids(project_id) update_data_mock.assert_called_with( section_data=[ - {'name': 'Skipped test', 'section_id': 1, 'suite_id': 4}, - {'name': 'Passed test', 'section_id': 2, 'suite_id': 4} + {"name": "Skipped test", "section_id": 1, "suite_id": 4}, + {"name": "Passed test", "section_id": 2, "suite_id": 4}, ] ) assert not missing, "There should be no missing section" @@ -282,9 +263,7 @@ def test_add_sections(self, api_request_handler: ApiRequestHandler, requests_moc "name": "Passed test", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.add_sections(project_id) assert ( @@ -296,20 +275,17 @@ def test_add_sections(self, api_request_handler: ApiRequestHandler, requests_moc assert error == "", "Error occurred in add_section" assert ( - api_request_handler.suites_data_from_provider.testsections[1].section_id - == mocked_response["id"] + api_request_handler.suites_data_from_provider.testsections[1].section_id == mocked_response["id"] ), "Added section id in DataProvider doesn't match mocked response id." @pytest.mark.api_handler - def test_add_section_and_cases( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_section_and_cases(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response_for_section = { "id": 12345, "suite_id": 4, "name": "Passed test", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } mocked_response_for_case_1 = { @@ -317,7 +293,7 @@ def test_add_section_and_cases( "suite_id": 4, "section_id": 1234, "title": "testCase2", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } mocked_response_for_case_2 = { @@ -325,12 +301,10 @@ def test_add_section_and_cases( "suite_id": 4, "section_id": 12345, "title": "testCase3", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response_for_section - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section) requests_mock.post( create_url(f"add_case/{mocked_response_for_case_1['section_id']}"), json=mocked_response_for_case_1, @@ -371,9 +345,7 @@ def test_add_run(self, api_request_handler: ApiRequestHandler, requests_mock): requests_mock.post(create_url(f"add_run/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.add_run(project_id, run_name) - assert ( - mocked_response["id"] == resources_added - ), "Added run id doesn't match mocked response id" + assert mocked_response["id"] == resources_added, "Added run id doesn't match mocked response id" assert error == "", "Error occurred in add_case" @pytest.mark.api_handler @@ -394,40 +366,37 @@ def test_add_results(self, api_request_handler: ApiRequestHandler, requests_mock "version": "1.0RC1", } ] - requests_mock.post( - create_url(f"add_results_for_cases/{run_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_results_for_cases/{run_id}"), json=mocked_response) tests_mocked_response = { - 'offset': 0, - 'limit': 250, - 'size': 4, - '_links': {'next': None, 'prev': None}, - 'tests': [ + "offset": 0, + "limit": 250, + "size": 4, + "_links": {"next": None, "prev": None}, + "tests": [ { - 'id': 4, - 'case_id': 1, - 'status_id': 5, - 'assignedto_id': None, - 'run_id': run_id, - 'title': 'Fail To Login With Invalid Password' - } - ] + "id": 4, + "case_id": 1, + "status_id": 5, + "assignedto_id": None, + "run_id": run_id, + "title": "Fail To Login With Invalid Password", + } + ], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response) attachments_mock_response = {"attachment_id": 123} - requests_mock.post( - create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response - ) + requests_mock.post(create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response) with patch("builtins.open", mock_open()) as mock_file: resources_added, error, results_added = api_request_handler.add_results(run_id) assert [mocked_response] == resources_added, "Invalid response from add_results" assert error == "", "Error occurred in add_results" - assert results_added == len(mocked_response), \ - f"Expected {len(mocked_response)} results to be added but got {results_added} instead." + assert results_added == len( + mocked_response + ), f"Expected {len(mocked_response)} results to be added but got {results_added} instead." mock_file.assert_any_call("./path1", "rb") mock_file.assert_any_call("./path2", "rb") @@ -446,12 +415,10 @@ def test_close_run(self, api_request_handler: ApiRequestHandler, requests_mock): assert error == "", "Error occurred in close_run" @pytest.mark.api_handler - def test_check_missing_test_cases_ids_true( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_test_cases_ids_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_id = api_request_handler.suites_data_from_provider.suite_id - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response_page_1 = { "_links": { "next": None, @@ -459,16 +426,14 @@ def test_check_missing_test_cases_ids_true( }, "cases": [ {"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234}, - {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234} + {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}, ], } requests_mock.get( create_url(f"get_cases/{project_id}&suite_id={suite_id}"), json=mocked_response_page_1, ) - missing_ids, error = api_request_handler.check_missing_test_cases_ids( - project_id - ) + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) update_data_mock.assert_called_with( case_data=[ @@ -476,27 +441,24 @@ def test_check_missing_test_cases_ids_true( "case_id": 1, "custom_automation_id": "Skipped test.testCase1", "section_id": 1234, - "title": "testCase1" + "title": "testCase1", }, { "case_id": 2, - "custom_automation_id": - "Skipped test.testCase2", + "custom_automation_id": "Skipped test.testCase2", "section_id": 1234, - "title": "testCase2" - } + "title": "testCase2", + }, ] ) assert missing_ids, "There is one missing test case" assert error == "", "Error occurred in check" @pytest.mark.api_handler - def test_check_missing_test_cases_ids_false( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_test_cases_ids_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_id = api_request_handler.suites_data_from_provider.suite_id - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response_page_1 = { "_links": { "next": f"/api/v2/get_cases/{project_id}&suite_id={suite_id}&limit=1&offset=1", @@ -504,7 +466,7 @@ def test_check_missing_test_cases_ids_false( }, "cases": [ {"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234}, - {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234} + {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}, ], } mocked_response_page_2 = { @@ -521,29 +483,22 @@ def test_check_missing_test_cases_ids_false( create_url(f"get_cases/{project_id}&suite_id={suite_id}&limit=1&offset=1"), json=mocked_response_page_2, ) - missing_ids, error = api_request_handler.check_missing_test_cases_ids( - project_id - ) + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) update_data_mock.assert_called_with( case_data=[ { "case_id": 1, "custom_automation_id": "Skipped test.testCase1", "section_id": 1234, - "title": "testCase1" + "title": "testCase1", }, { "case_id": 2, "custom_automation_id": "Skipped test.testCase2", "section_id": 1234, - "title": "testCase2" + "title": "testCase2", }, - { - "case_id": 1, - "custom_automation_id": "Passed test.testCase3", - "section_id": 2, - "title": "testCase3" - } + {"case_id": 1, "custom_automation_id": "Passed test.testCase3", "section_id": 2, "title": "testCase3"}, ] ) assert not missing_ids, "No missing ids" @@ -560,35 +515,30 @@ def test_get_suite_ids(self, api_request_handler: ApiRequestHandler, requests_mo requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.get_suite_ids(project_id) assert ( - resources_added[0] == mocked_response[0]["id"] and - resources_added[1] == mocked_response[1]["id"] + resources_added[0] == mocked_response[0]["id"] and resources_added[1] == mocked_response[1]["id"] ), "ID in response doesn't match mocked response" @pytest.mark.api_handler - def test_get_suite_ids_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_get_suite_ids_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 - - requests_mock.get( - create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout - ) - + + requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout) + suite_ids, error = api_request_handler.get_suite_ids(project_id) - + assert suite_ids == [], "Should return empty list on API error" - assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ - " Please check your settings and try again.", "Should return connection error message" + assert ( + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + " Please check your settings and try again." + ), "Should return connection error message" @pytest.mark.api_handler - def test_resolve_suite_id_using_name( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_resolve_suite_id_using_name(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_name = "Suite2" api_request_handler.suites_data_from_provider.name = suite_name - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "offset": 0, @@ -598,43 +548,36 @@ def test_resolve_suite_id_using_name( "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, - ] + ], } - + requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) - + suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) - + assert suite_id == 5, "Should return the correct suite ID for matching name with pagination" assert error == "", "Should have no error message" - + update_data_mock.assert_called_once_with([{"suite_id": 5, "name": "Suite2"}]) @pytest.mark.api_handler - def test_resolve_suite_id_using_name_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_resolve_suite_id_using_name_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 - requests_mock.get( - create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout - ) + requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout) suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) assert suite_id == -1, "Should return -1 on API error" - assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ - " Please check your settings and try again.", "Should return connection error message" - + assert ( + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + " Please check your settings and try again." + ), "Should return connection error message" @pytest.mark.api_handler - def test_return_project_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project_error(self, api_request_handler: ApiRequestHandler, requests_mock): - requests_mock.get( - create_url("get_projects"), exc=requests.exceptions.ConnectTimeout - ) + requests_mock.get(create_url("get_projects"), exc=requests.exceptions.ConnectTimeout) assert api_request_handler.get_project_data("Test Project") == ProjectData( project_id=-3, suite_mode=-1, @@ -643,9 +586,7 @@ def test_return_project_error( ), "Get project should return proper project data object with error" @pytest.mark.api_handler - def test_add_suite_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_suite_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 api_request_handler.suites_data_from_provider.suite_id = None @@ -658,15 +599,12 @@ def test_add_suite_error( assert resources_added == [], "No resources should be added" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" @pytest.mark.api_handler - def test_add_sections_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_sections_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 requests_mock.post( create_url(f"add_section/{project_id}"), @@ -676,20 +614,16 @@ def test_add_sections_error( assert resources_added == [], "No resources should be added" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" assert ( - api_request_handler.suites_data_from_provider.testsections[1].section_id - is None + api_request_handler.suites_data_from_provider.testsections[1].section_id is None ), "No resources should be added to DataProvider" @pytest.mark.api_handler - def test_add_section_and_cases_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_section_and_cases_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response_for_section = { "id": 12345, @@ -702,7 +636,7 @@ def test_add_section_and_cases_error( "suite_id": 4, "section_id": 1234, "title": "testCase2", - "custom_automation_id": "Skipped test.testCase2" + "custom_automation_id": "Skipped test.testCase2", } mocked_response_for_case_2 = { @@ -710,12 +644,10 @@ def test_add_section_and_cases_error( "suite_id": 4, "section_id": 12345, "title": "testCase3", - "custom_automation_id": "Passed test.testCase3" + "custom_automation_id": "Passed test.testCase3", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response_for_section - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section) requests_mock.post( create_url(f"add_case/{mocked_response_for_case_1['section_id']}"), json=mocked_response_for_case_1, @@ -735,65 +667,55 @@ def test_add_section_and_cases_error( mocked_response_for_case_1["id"], ], "Added case id doesn't match mocked response id" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" @pytest.mark.api_handler - def test_add_results_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_results_error(self, api_request_handler: ApiRequestHandler, requests_mock): run_id = 3 requests_mock.post( create_url(f"add_results_for_cases/{run_id}"), exc=requests.exceptions.ConnectTimeout, ) tests_mocked_response = { - 'offset': 0, - 'limit': 250, - 'size': 4, - '_links': {'next': None, 'prev': None}, - 'tests': [ + "offset": 0, + "limit": 250, + "size": 4, + "_links": {"next": None, "prev": None}, + "tests": [ { - 'id': 18319, - 'case_id': 6086, - 'status_id': 5, - 'assignedto_id': None, - 'run_id': run_id, - 'title': 'Fail To Login With Invalid Password' - } - ] + "id": 18319, + "case_id": 6086, + "status_id": 5, + "assignedto_id": None, + "run_id": run_id, + "title": "Fail To Login With Invalid Password", + } + ], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response) resources_added, error, results_added = api_request_handler.add_results(run_id) assert resources_added == [], "Expected empty list of added resources" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" assert results_added == 0, "Expected 0 resources to be added." @pytest.mark.api_handler - def test_add_results_keyboard_interrupt( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_add_results_keyboard_interrupt(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): run_id = 3 requests_mock.post( create_url(f"add_results_for_cases/{run_id}"), exc=requests.exceptions.ConnectTimeout, ) - mocker.patch( - "trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt - ) + mocker.patch("trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt) with pytest.raises(KeyboardInterrupt) as exception: api_request_handler.add_results(run_id) @pytest.mark.api_handler - def test_add_suite_with_verify( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_add_suite_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = { "description": "..", @@ -818,9 +740,7 @@ def test_add_section_with_verify(self, handler_maker, requests_mock): "description": "Some description", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response) api_request_handler_verify = handler_maker(verify=True) resources_added, error = api_request_handler_verify.add_sections(project_id) assert error == "", "There should be no error in verification." @@ -828,45 +748,34 @@ def test_add_section_with_verify(self, handler_maker, requests_mock): api_request_handler_verify = handler_maker(verify=True) resources_added, error = api_request_handler_verify.add_sections(project_id) assert ( - error - == "Data verification failed. Server added different resource than expected." + error == "Data verification failed. Server added different resource than expected." ), "There should be error in verification." @pytest.mark.api_handler - def test_add_case_with_verify( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_add_case_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock): mocked_response_for_case = { "id": 3, "suite_id": 4, "section_id": 1234, "title": "testCase2", "estimate": "30s", - "custom_automation_id": "Skipped test.testCase2" + "custom_automation_id": "Skipped test.testCase2", } requests_mock.post( create_url(f"add_case/{mocked_response_for_case['section_id']}"), json=mocked_response_for_case, ) - del api_request_handler_verify.suites_data_from_provider.testsections[ - 1 - ].testcases[0] + del api_request_handler_verify.suites_data_from_provider.testsections[1].testcases[0] resources_added, error = api_request_handler_verify.add_cases() assert error == "", "There should be no error in verification." mocked_response_for_case["estimate"] = "60s" - api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[ - 1 - ].case_id = None + api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[1].case_id = None resources_added, error = api_request_handler_verify.add_cases() - assert ( - error == FAULT_MAPPING["data_verification_error"] - ), "There should be error in verification." + assert error == FAULT_MAPPING["data_verification_error"], "There should be error in verification." @pytest.mark.api_handler - def test_delete_section( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_section(self, api_request_handler_verify: ApiRequestHandler, requests_mock): sections_id = [{"section_id": 1}] mocked_response_for_case = {"success": 200} @@ -879,9 +788,7 @@ def test_delete_section( assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_suite( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_suite(self, api_request_handler_verify: ApiRequestHandler, requests_mock): suite_id = 1 mocked_response_for_case = {"success": 200} @@ -894,9 +801,7 @@ def test_delete_suite( assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_cases( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_cases(self, api_request_handler_verify: ApiRequestHandler, requests_mock): suite_id = 1 cases = [{"case_id": 1}] mocked_response_for_case = {"success": 200} @@ -906,15 +811,11 @@ def test_delete_cases( json=mocked_response_for_case, ) - resources_added, error = api_request_handler_verify.delete_cases( - suite_id, cases - ) + resources_added, error = api_request_handler_verify.delete_cases(suite_id, cases) assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_run( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_run(self, api_request_handler_verify: ApiRequestHandler, requests_mock): run_id = 1 mocked_response_for_case = {"success": 200} @@ -925,3 +826,239 @@ def test_delete_run( resources_added, error = api_request_handler_verify.delete_run(run_id) assert error == "", "There should be no error in verification." + + @pytest.mark.api_handler + def test_update_run_with_include_all_false_standalone(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test update_run for standalone run with include_all=false""" + run_id = 100 + run_name = "Updated Test Run" + + # Mock get_run response - standalone run (no plan_id), include_all=false + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": False, + "plan_id": None, + "config_ids": [], + } + + # Mock get_tests response - existing cases in run + get_tests_response = { + "offset": 0, + "limit": 250, + "size": 2, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1, "case_id": 1, "status_id": 1}, {"id": 2, "case_id": 2, "status_id": 1}], + } + + # Mock update_run response + update_run_response = {"id": run_id, "name": run_name} + + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response) + requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response) + + # Execute update_run + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert error == "", "No error should occur" + assert run_data["id"] == run_id, "Run ID should match" + + # Verify the payload sent to update_run + request_history = requests_mock.request_history + update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0] + payload = update_request.json() + + assert payload["include_all"] == False, "include_all should be False" + assert "case_ids" in payload, "case_ids should be present" + # Should contain union of existing (1, 2) and report cases + assert set(payload["case_ids"]) >= {1, 2}, "Should include existing case IDs" + + @pytest.mark.api_handler + def test_update_run_with_include_all_false_plan_with_config( + self, api_request_handler: ApiRequestHandler, requests_mock + ): + """Test update_run for run in plan with config and include_all=false (the bug scenario)""" + run_id = 200 + run_name = "Updated Test Run in Plan" + + # Mock get_run response - run in plan with config, include_all=false + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": False, + "plan_id": 10, + "config_ids": [5, 6], # Has configs - will use update_run_in_plan_entry + } + + # Mock get_tests response - existing cases + get_tests_response = { + "offset": 0, + "limit": 250, + "size": 3, + "_links": {"next": None, "prev": None}, + "tests": [ + {"id": 1, "case_id": 188, "status_id": 1}, + {"id": 2, "case_id": 180, "status_id": 1}, + {"id": 3, "case_id": 191, "status_id": 1}, + ], + } + + # Mock update_run_in_plan_entry response + update_run_response = {"id": run_id, "name": run_name} + + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response) + requests_mock.post(create_url(f"update_run_in_plan_entry/{run_id}"), json=update_run_response) + + # Execute update_run + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert error == "", "No error should occur" + assert run_data["id"] == run_id, "Run ID should match" + + # Verify the payload sent to update_run_in_plan_entry + request_history = requests_mock.request_history + update_request = [r for r in request_history if "update_run_in_plan_entry" in r.url][0] + payload = update_request.json() + + # THIS IS THE CRITICAL FIX - must include include_all=False + assert payload["include_all"] == False, "include_all must be False (fixes the bug)" + assert "case_ids" in payload, "case_ids should be present" + # Should contain union of existing (188, 180, 191) and report cases + assert set(payload["case_ids"]) >= {188, 180, 191}, "Should preserve existing case IDs" + + @pytest.mark.api_handler + def test_update_run_with_include_all_true_preserves_setting( + self, api_request_handler: ApiRequestHandler, requests_mock + ): + """Test update_run preserves include_all=true and doesn't send case_ids""" + run_id = 300 + run_name = "Updated Run with Include All" + + # Mock get_run response - include_all=true + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": True, # Run includes all cases + "plan_id": None, + "config_ids": [], + } + + # Mock update_run response + update_run_response = {"id": run_id, "name": run_name, "include_all": True} + + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response) + + # Execute update_run + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert error == "", "No error should occur" + assert run_data["include_all"] == True, "include_all should be preserved" + + # Verify the payload sent to update_run + request_history = requests_mock.request_history + update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0] + payload = update_request.json() + + assert payload["include_all"] == True, "include_all should be True" + assert "case_ids" not in payload, "case_ids should NOT be present when include_all=True" + + @pytest.mark.api_handler + def test_update_run_handles_get_tests_error(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test update_run handles errors from get_tests gracefully""" + run_id = 400 + run_name = "Test Run" + + # Mock get_run response - include_all=false + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": False, + "plan_id": None, + "config_ids": [], + } + + # Mock get_tests to return error (403 Forbidden, for example) + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.get(create_url(f"get_tests/{run_id}"), status_code=403, json={"error": "Access denied"}) + + # Execute update_run - should fail gracefully + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert run_data is None, "run_data should be None on error" + assert error is not None, "Error message should be present" + assert "Failed to get tests in run" in error, "Error should indicate get_tests failure" + + @pytest.mark.api_handler + def test_update_run_with_include_all_false_plan_without_config( + self, api_request_handler: ApiRequestHandler, requests_mock + ): + """Test update_run for run in plan without config uses update_plan_entry""" + run_id = 500 + run_name = "Updated Test Run in Plan No Config" + plan_id = 20 + entry_id = "abc-123" + + # Mock get_run response - run in plan without config + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": False, + "plan_id": plan_id, + "config_ids": [], # No configs - will use update_plan_entry + } + + # Mock get_tests response + get_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1, "case_id": 50, "status_id": 1}], + } + + # Mock get_plan response + get_plan_response = { + "id": plan_id, + "entries": [{"id": entry_id, "runs": [{"id": run_id, "entry_id": entry_id}]}], + } + + # Mock update_plan_entry response + update_plan_response = {"id": run_id, "name": run_name} + + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response) + requests_mock.get(create_url(f"get_plan/{plan_id}"), json=get_plan_response) + requests_mock.post(create_url(f"update_plan_entry/{plan_id}/{entry_id}"), json=update_plan_response) + + # Execute update_run + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert error == "", "No error should occur" + assert run_data["id"] == run_id, "Run ID should match" + + # Verify update_plan_entry was called with correct payload + request_history = requests_mock.request_history + update_request = [r for r in request_history if f"update_plan_entry/{plan_id}/{entry_id}" in r.url][0] + payload = update_request.json() + + assert payload["include_all"] == False, "include_all should be False" + assert "case_ids" in payload, "case_ids should be present" + assert 50 in payload["case_ids"], "Should include existing case ID" diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 3dcd196..8c892c2 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -7,7 +7,9 @@ from trcli.cli import Environment from trcli.constants import ( ProjectErrors, - FAULT_MAPPING, OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID, + FAULT_MAPPING, + OLD_SYSTEM_NAME_AUTOMATION_ID, + UPDATED_SYSTEM_NAME_AUTOMATION_ID, ) from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailCase, ProjectData @@ -33,7 +35,7 @@ def __init__( environment.case_fields, environment.run_description, environment.result_fields, - environment.section_id + environment.section_id, ) self.suites_data_from_provider = self.data_provider.suites_input self.response_verifier = ApiResponseVerify(verify) @@ -48,11 +50,11 @@ def check_automation_id_field(self, project_id: int) -> Union[str, None]: if not response.error_message: fields: List = response.response_text automation_id_field = next( - filter( + filter( lambda x: x["system_name"] in [OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID], - fields + fields, ), - None + None, ) if automation_id_field: if automation_id_field["is_active"] is False: @@ -79,11 +81,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project """ projects_data, error = self.__get_all_projects() if not error: - available_projects = [ - project - for project in projects_data - if project["name"] == project_name - ] + available_projects = [project for project in projects_data if project["name"] == project_name] if len(available_projects) == 1: return ProjectData( @@ -94,9 +92,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project elif len(available_projects) > 1: if project_id in [project["id"] for project in available_projects]: project_index = [ - index - for index, project in enumerate(available_projects) - if project["id"] == project_id + index for index, project in enumerate(available_projects) if project["id"] == project_id ][0] return ProjectData( project_id=int(available_projects[project_index]["id"]), @@ -131,11 +127,7 @@ def check_suite_id(self, project_id: int) -> Tuple[bool, str]: suite_id = self.suites_data_from_provider.suite_id suites_data, error = self.__get_all_suites(project_id) if not error: - available_suites = [ - suite - for suite in suites_data - if suite["id"] == suite_id - ] + available_suites = [suite for suite in suites_data if suite["id"] == suite_id] return ( (True, "") if len(available_suites) > 0 @@ -207,9 +199,7 @@ def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: response = self.client.send_post(f"add_suite/{project_id}", body) if not response.error_message: responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): + if not self.response_verifier.verify_returned_data(body, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] break @@ -224,9 +214,11 @@ def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: } for response in responses ] - self.data_provider.update_data(suite_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" + ( + self.data_provider.update_data(suite_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) return returned_resources, error_message def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: @@ -246,20 +238,24 @@ def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: if self.environment.section_id: if section.section_id in sections_by_id.keys(): section_json = sections_by_id[section.section_id] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) else: missing_test_sections = True if section.name in sections_by_name.keys(): section_json = sections_by_name[section.name] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) else: missing_test_sections = True self.data_provider.update_data(section_data=section_data) @@ -281,9 +277,7 @@ def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: response = self.client.send_post(f"add_section/{project_id}", body) if not response.error_message: responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): + if not self.response_verifier.verify_returned_data(body, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] break @@ -298,9 +292,11 @@ def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: } for response in responses ] - self.data_provider.update_data(section_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" + ( + self.data_provider.update_data(section_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) return returned_resources, error_message def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: @@ -327,12 +323,14 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: aut_id = test_case.custom_automation_id if aut_id in test_cases_by_aut_id.keys(): case = test_cases_by_aut_id[aut_id] - test_case_data.append({ - "case_id": case["id"], - "section_id": case["section_id"], - "title": case["title"], - OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id - }) + test_case_data.append( + { + "case_id": case["id"], + "section_id": case["section_id"], + "title": case["title"], + OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id, + } + ) else: missing_cases_number += 1 self.data_provider.update_data(case_data=test_case_data) @@ -386,25 +384,25 @@ def add_cases(self) -> Tuple[List[dict], str]: { "case_id": response.response_text["id"], "section_id": response.response_text["section_id"], - "title": response.response_text["title"] + "title": response.response_text["title"], } for response in responses ] return returned_resources, error_message def add_run( - self, - project_id: int, - run_name: str, - milestone_id: int = None, - start_date: str = None, - end_date: str = None, - plan_id: int = None, - config_ids: List[int] = None, - assigned_to_id: int = None, - include_all: bool = False, - refs: str = None, - case_ids: List[int] = None, + self, + project_id: int, + run_name: str, + milestone_id: int = None, + start_date: str = None, + end_date: str = None, + plan_id: int = None, + config_ids: List[int] = None, + assigned_to_id: int = None, + include_all: bool = False, + refs: str = None, + case_ids: List[int] = None, ) -> Tuple[int, str]: """ Creates a new test run. @@ -432,7 +430,7 @@ def add_run( "name": add_run_data["name"], "suite_id": add_run_data["suite_id"], "config_ids": config_ids, - "runs": [add_run_data] + "runs": [add_run_data], } else: entry_data = add_run_data @@ -440,8 +438,16 @@ def add_run( run_id = response.response_text["runs"][0]["id"] return run_id, response.error_message - def update_run(self, run_id: int, run_name: str, start_date: str = None, - end_date: str = None, milestone_id: int = None, refs: str = None, refs_action: str = 'add') -> Tuple[dict, str]: + def update_run( + self, + run_id: int, + run_name: str, + start_date: str = None, + end_date: str = None, + milestone_id: int = None, + refs: str = None, + refs_action: str = "add", + ) -> Tuple[dict, str]: """ Updates an existing run :run_id: run id @@ -453,12 +459,13 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, run_response = self.client.send_get(f"get_run/{run_id}") if run_response.error_message: return None, run_response.error_message - + existing_description = run_response.response_text.get("description", "") existing_refs = run_response.response_text.get("refs", "") - add_run_data = self.data_provider.add_run(run_name, start_date=start_date, - end_date=end_date, milestone_id=milestone_id) + add_run_data = self.data_provider.add_run( + run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id + ) add_run_data["description"] = existing_description # Retain the current description # Handle references based on action @@ -468,12 +475,23 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, else: add_run_data["refs"] = existing_refs # Keep existing refs if none provided - run_tests, error_message = self.__get_all_tests_in_run(run_id) - run_case_ids = [test["case_id"] for test in run_tests] - report_case_ids = add_run_data["case_ids"] - joint_case_ids = list(set(report_case_ids + run_case_ids)) - add_run_data["case_ids"] = joint_case_ids - + existing_include_all = run_response.response_text.get("include_all", False) + add_run_data["include_all"] = existing_include_all + + if not existing_include_all: + # Only manage explicit case_ids when include_all=False + run_tests, error_message = self.__get_all_tests_in_run(run_id) + if error_message: + return None, f"Failed to get tests in run: {error_message}" + run_case_ids = [test["case_id"] for test in run_tests] + report_case_ids = add_run_data["case_ids"] + joint_case_ids = list(set(report_case_ids + run_case_ids)) + add_run_data["case_ids"] = joint_case_ids + else: + # include_all=True: TestRail includes all suite cases automatically + # Do NOT send case_ids array (TestRail ignores it anyway) + add_run_data.pop("case_ids", None) + plan_id = run_response.response_text["plan_id"] config_ids = run_response.response_text["config_ids"] if not plan_id: @@ -505,29 +523,29 @@ def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> """ if not existing_refs: existing_refs = "" - - if action == 'update': + + if action == "update": # Replace all references with new ones return new_refs - elif action == 'delete': + elif action == "delete": if not new_refs: # Delete all references return "" else: # Delete specific references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - refs_to_delete = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + refs_to_delete = [ref.strip() for ref in new_refs.split(",") if ref.strip()] updated_list = [ref for ref in existing_list if ref not in refs_to_delete] - return ','.join(updated_list) + return ",".join(updated_list) else: # action == 'add' (default) # Add new references to existing ones if not existing_refs: return new_refs - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - new_list = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + new_list = [ref.strip() for ref in new_refs.split(",") if ref.strip()] # Avoid duplicates combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] - return ','.join(combined_list) + return ",".join(combined_list) def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: """ @@ -540,11 +558,11 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic run_response = self.client.send_get(f"get_run/{run_id}") if run_response.error_message: return None, [], [], run_response.error_message - + existing_refs = run_response.response_text.get("refs", "") or "" - + # Parse existing and new references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] + existing_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] # Deduplicate input references new_list = [] seen = set() @@ -553,28 +571,33 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic if ref_clean and ref_clean not in seen: new_list.append(ref_clean) seen.add(ref_clean) - + # Determine which references are new vs duplicates added_refs = [ref for ref in new_list if ref not in existing_list] skipped_refs = [ref for ref in new_list if ref in existing_list] - + # If no new references to add, return current state if not added_refs: return run_response.response_text, added_refs, skipped_refs, None - + # Combine references combined_list = existing_list + added_refs - combined_refs = ','.join(combined_list) - + combined_refs = ",".join(combined_list) + if len(combined_refs) > 250: - return None, [], [], f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit" - + return ( + None, + [], + [], + f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit", + ) + update_data = {"refs": combined_refs} - + # Determine the correct API endpoint based on plan membership plan_id = run_response.response_text.get("plan_id") config_ids = run_response.response_text.get("config_ids") - + if not plan_id: # Standalone run update_response = self.client.send_post(f"update_run/{run_id}", update_data) @@ -586,7 +609,7 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic plan_response = self.client.send_get(f"get_plan/{plan_id}") if plan_response.error_message: return None, [], [], f"Failed to get plan details: {plan_response.error_message}" - + # Find the entry_id for this run entry_id = None for entry in plan_response.response_text.get("entries", []): @@ -596,19 +619,21 @@ def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dic break if entry_id: break - + if not entry_id: return None, [], [], f"Could not find plan entry for run {run_id}" - + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) - + if update_response.error_message: return None, [], [], update_response.error_message - + updated_run_response = self.client.send_get(f"get_run/{run_id}") return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message - def update_existing_case_references(self, case_id: int, junit_refs: str, strategy: str = "append") -> Tuple[bool, str, List[str], List[str]]: + def update_existing_case_references( + self, case_id: int, junit_refs: str, strategy: str = "append" + ) -> Tuple[bool, str, List[str], List[str]]: """ Update existing case references with values from JUnit properties. :param case_id: ID of the test case @@ -618,62 +643,69 @@ def update_existing_case_references(self, case_id: int, junit_refs: str, strateg """ if not junit_refs or not junit_refs.strip(): return True, None, [], [] # No references to process - + # Parse and validate JUnit references, deduplicating input junit_ref_list = [] seen = set() - for ref in junit_refs.split(','): + for ref in junit_refs.split(","): ref_clean = ref.strip() if ref_clean and ref_clean not in seen: junit_ref_list.append(ref_clean) seen.add(ref_clean) - + if not junit_ref_list: return False, "No valid references found in JUnit property", [], [] - + # Get current case data case_response = self.client.send_get(f"get_case/{case_id}") if case_response.error_message: return False, case_response.error_message, [], [] - - existing_refs = case_response.response_text.get('refs', '') or '' - + + existing_refs = case_response.response_text.get("refs", "") or "" + if strategy == "replace": # Replace strategy: use JUnit refs as-is - new_refs = ','.join(junit_ref_list) + new_refs = ",".join(junit_ref_list) added_refs = junit_ref_list skipped_refs = [] else: # Append strategy: combine with existing refs, avoiding duplicates - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] - + existing_ref_list = ( + [ref.strip() for ref in existing_refs.split(",") if ref.strip()] if existing_refs else [] + ) + # Determine which references are new vs duplicates added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] - + # If no new references to add, return current state if not added_refs: return True, None, added_refs, skipped_refs - + # Combine references combined_list = existing_ref_list + added_refs - new_refs = ','.join(combined_list) - + new_refs = ",".join(combined_list) + # Validate 2000 character limit for test case references if len(new_refs) > 2000: - return False, f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", [], [] - + return ( + False, + f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", + [], + [], + ) + # Update the case update_data = {"refs": new_refs} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.error_message: return False, update_response.error_message, [], [] - + return True, None, added_refs, skipped_refs def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): - """ Getting test result id and upload attachments for it. """ + """Getting test result id and upload attachments for it.""" tests_in_run, error = self.__get_all_tests_in_run(run_id) if not error: for report_result in report_results: @@ -698,26 +730,18 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: responses = [] error_message = "" # Get pre-validated user IDs if available - user_ids = getattr(self.environment, '_validated_user_ids', []) - - add_results_data_chunks = self.data_provider.add_results_for_cases( - self.environment.batch_size, user_ids - ) + user_ids = getattr(self.environment, "_validated_user_ids", []) + + add_results_data_chunks = self.data_provider.add_results_for_cases(self.environment.batch_size, user_ids) # Get assigned count from data provider - assigned_count = getattr(self.data_provider, '_assigned_count', 0) - - results_amount = sum( - [len(results["results"]) for results in add_results_data_chunks] - ) + assigned_count = getattr(self.data_provider, "_assigned_count", 0) - with self.environment.get_progress_bar( - results_amount=results_amount, prefix="Adding results" - ) as progress_bar: + results_amount = sum([len(results["results"]) for results in add_results_data_chunks]) + + with self.environment.get_progress_bar(results_amount=results_amount, prefix="Adding results") as progress_bar: with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: futures = { - executor.submit( - self.client.send_post, f"add_results_for_cases/{run_id}", body - ): body + executor.submit(self.client.send_post, f"add_results_for_cases/{run_id}", body): body for body in add_results_data_chunks } responses, error_message = self.handle_futures( @@ -730,11 +754,7 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: # Iterate through futures to get all responses from done tasks (not cancelled) responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) responses = [response.response_text for response in responses] - results = [ - result - for results_list in responses - for result in results_list - ] + results = [result for results_list in responses for result in results_list] report_results_w_attachments = [] for results_data_chunk in add_results_data_chunks: for test_result in results_data_chunk["results"]: @@ -744,22 +764,22 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: attachments_count = 0 for result in report_results_w_attachments: attachments_count += len(result["attachments"]) - self.environment.log(f"Uploading {attachments_count} attachments " - f"for {len(report_results_w_attachments)} test results.") + self.environment.log( + f"Uploading {attachments_count} attachments " f"for {len(report_results_w_attachments)} test results." + ) self.upload_attachments(report_results_w_attachments, results, run_id) else: self.environment.log(f"No attachments found to upload.") - + # Log assignment results if assignment was performed if user_ids: - total_failed = getattr(self.data_provider, '_total_failed_count', assigned_count) + total_failed = getattr(self.data_provider, "_total_failed_count", assigned_count) if assigned_count > 0: self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") else: self.environment.log(f"Assigning failed results: 0/0, Done.") - - return responses, error_message, progress_bar.n + return responses, error_message, progress_bar.n def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, str]: responses = [] @@ -776,9 +796,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st if action_string == "add_case": arguments = arguments.to_dict() arguments.pop("case_id") - if not self.response_verifier.verify_returned_data( - arguments, response.response_text - ): + if not self.response_verifier.verify_returned_data(arguments, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] self.__cancel_running_futures(futures, action_string) @@ -786,9 +804,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st progress_bar.update(1) else: error_message = response.error_message - self.environment.log( - f"\nError during {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nError during {action_string}. Trying to cancel scheduled tasks.") self.__cancel_running_futures(futures, action_string) break else: @@ -826,9 +842,7 @@ def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: responses = [] error_message = "" for section in added_sections: - response = self.client.send_post( - f"delete_section/{section['section_id']}", payload={} - ) + response = self.client.send_post(f"delete_section/{section['section_id']}", payload={}) if not response.error_message: responses.append(response.response_text) else: @@ -868,45 +882,52 @@ def retrieve_results_after_cancelling(futures) -> list: def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: """ Validates a user email and returns the user ID if valid. - + :param email: User email to validate :returns: Tuple with user ID (or None if not found) and error message """ if not email or not email.strip(): return None, "Email cannot be empty" - + email = email.strip() # Use proper URL encoding for the query parameter import urllib.parse + encoded_email = urllib.parse.quote_plus(email) response = self.client.send_get(f"get_user_by_email&email={encoded_email}") - + if response.error_message: # Map TestRail's email validation error to our expected format if "Field :email is not a valid email address" in response.error_message: return None, f"User not found: {email}" return None, response.error_message - + if response.status_code == 200: try: user_data = response.response_text - if isinstance(user_data, dict) and 'id' in user_data: - return user_data['id'], "" + if isinstance(user_data, dict) and "id" in user_data: + return user_data["id"], "" else: return None, f"Invalid response format for user: {email}" except (KeyError, TypeError): return None, f"Invalid response format for user: {email}" elif response.status_code == 400: # Check if the response contains the email validation error - if (hasattr(response, 'response_text') and response.response_text and - isinstance(response.response_text, dict) and - "Field :email is not a valid email address" in str(response.response_text.get('error', ''))): + if ( + hasattr(response, "response_text") + and response.response_text + and isinstance(response.response_text, dict) + and "Field :email is not a valid email address" in str(response.response_text.get("error", "")) + ): return None, f"User not found: {email}" return None, f"User not found: {email}" else: # For other status codes, check if it's the email validation error - if (hasattr(response, 'response_text') and response.response_text and - "Field :email is not a valid email address" in str(response.response_text)): + if ( + hasattr(response, "response_text") + and response.response_text + and "Field :email is not a valid email address" in str(response.response_text) + ): return None, f"User not found: {email}" return None, f"API error (status {response.status_code}) when validating user: {email}" @@ -925,9 +946,7 @@ def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: return response def __cancel_running_futures(self, futures, action_string): - self.environment.log( - f"\nAborting: {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nAborting: {action_string}. Trying to cancel scheduled tasks.") for future in futures: future.cancel() @@ -936,33 +955,33 @@ def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], s Get all cases from all pages """ if suite_id is None: - return self.__get_all_entities('cases', f"get_cases/{project_id}") + return self.__get_all_entities("cases", f"get_cases/{project_id}") else: - return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") + return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}") def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ Get all sections from all pages """ - return self.__get_all_entities('sections', f"get_sections/{project_id}&suite_id={suite_id}") + return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}") def __get_all_tests_in_run(self, run_id=None) -> Tuple[List[dict], str]: """ Get all tests from all pages """ - return self.__get_all_entities('tests', f"get_tests/{run_id}") + return self.__get_all_entities("tests", f"get_tests/{run_id}") def __get_all_projects(self) -> Tuple[List[dict], str]: """ Get all projects from all pages """ - return self.__get_all_entities('projects', f"get_projects") + return self.__get_all_entities("projects", f"get_projects") def __get_all_suites(self, project_id) -> Tuple[List[dict], str]: """ Get all suites from all pages """ - return self.__get_all_entities('suites', f"get_suites/{project_id}") + return self.__get_all_entities("suites", f"get_suites/{project_id}") def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[Dict], str]: """ @@ -979,9 +998,7 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ return response.response_text, response.error_message # Check if response is a string (JSON parse failed) if isinstance(response.response_text, str): - error_msg = FAULT_MAPPING["invalid_api_response"].format( - error_details=response.response_text[:200] - ) + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) return [], error_msg # Endpoints with pagination entities = entities + response.response_text[entity] @@ -1002,7 +1019,7 @@ def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: :returns: Tuple with created label data and error string """ # Use multipart/form-data like the working CURL command - files = {'title': (None, title)} + files = {"title": (None, title)} response = self.client.send_post(f"add_label/{project_id}", payload=None, files=files) return response.response_text, response.error_message @@ -1016,8 +1033,8 @@ def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict """ # Use multipart/form-data like add_label files = { - 'project_id': (None, str(project_id)), - 'title': (None, title) # Field name is 'title' (no colon) for form data + "project_id": (None, str(project_id)), + "title": (None, title), # Field name is 'title' (no colon) for form data } response = self.client.send_post(f"update_label/{label_id}", payload=None, files=files) return response.response_text, response.error_message @@ -1044,11 +1061,11 @@ def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tupl params.append(f"offset={offset}") if limit != 250: params.append(f"limit={limit}") - + url = f"get_labels/{project_id}" if params: url += "&" + "&".join(params) - + response = self.client.send_get(url) return response.response_text, response.error_message @@ -1070,16 +1087,19 @@ def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: """ # Send as form data with JSON array format import json + label_ids_json = json.dumps(label_ids) files = {"label_ids": (None, label_ids_json)} response = self.client.send_post("delete_labels", payload=None, files=files) success = response.status_code == 200 return success, response.error_message - def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, suite_id: int = None) -> Tuple[dict, str]: + def add_labels_to_cases( + self, case_ids: List[int], title: str, project_id: int, suite_id: int = None + ) -> Tuple[dict, str]: """ Add a label to multiple test cases - + :param case_ids: List of test case IDs :param title: Label title (max 20 characters) :param project_id: Project ID for validation @@ -1087,122 +1107,113 @@ def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, :returns: Tuple with response data and error string """ # Initialize results structure - results = { - 'successful_cases': [], - 'failed_cases': [], - 'max_labels_reached': [], - 'case_not_found': [] - } - + results = {"successful_cases": [], "failed_cases": [], "max_labels_reached": [], "case_not_found": []} + # Check if project is multi-suite by getting all cases without suite_id all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) if error_message: return results, error_message - + # Check if project has multiple suites suite_ids = set() for case in all_cases_no_suite: - if 'suite_id' in case and case['suite_id']: - suite_ids.add(case['suite_id']) - + if "suite_id" in case and case["suite_id"]: + suite_ids.add(case["suite_id"]) + # If project has multiple suites and no suite_id provided, require it if len(suite_ids) > 1 and suite_id is None: return results, "This project is multisuite, suite id is required" - + # Get all cases to validate that the provided case IDs exist all_cases, error_message = self.__get_all_cases(project_id, suite_id) if error_message: return results, error_message - + # Create a set of existing case IDs for quick lookup - existing_case_ids = {case['id'] for case in all_cases} - + existing_case_ids = {case["id"] for case in all_cases} + # Validate case IDs and separate valid from invalid ones invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] - + # Record invalid case IDs for case_id in invalid_case_ids: - results['case_not_found'].append(case_id) - + results["case_not_found"].append(case_id) + # If no valid case IDs, return early if not valid_case_ids: return results, "" - + # Check if label exists or create it existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - + # Find existing label with the same title label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") break - + # Create label if it doesn't exist if label_id is None: label_data, error_message = self.add_label(project_id, title) if error_message: return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + # Collect case data and validate constraints cases_to_update = [] for case_id in valid_case_ids: # Get current case to check existing labels case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: - results['failed_cases'].append({ - 'case_id': case_id, - 'error': f"Could not retrieve case {case_id}: {case_response.error_message}" - }) + results["failed_cases"].append( + {"case_id": case_id, "error": f"Could not retrieve case {case_id}: {case_response.error_message}"} + ) continue - + case_data = case_response.response_text - current_labels = case_data.get('labels', []) - + current_labels = case_data.get("labels", []) + # Check if label already exists on this case - if any(label.get('id') == label_id for label in current_labels): - results['successful_cases'].append({ - 'case_id': case_id, - 'message': f"Label '{title}' already exists on case {case_id}" - }) + if any(label.get("id") == label_id for label in current_labels): + results["successful_cases"].append( + {"case_id": case_id, "message": f"Label '{title}' already exists on case {case_id}"} + ) continue - + # Check maximum labels limit (10) if len(current_labels) >= 10: - results['max_labels_reached'].append(case_id) + results["max_labels_reached"].append(case_id) continue - + # Prepare case for update - existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] + existing_label_ids = [label.get("id") for label in current_labels if label.get("id")] updated_label_ids = existing_label_ids + [label_id] - cases_to_update.append({ - 'case_id': case_id, - 'labels': updated_label_ids - }) - + cases_to_update.append({"case_id": case_id, "labels": updated_label_ids}) + # Update cases using appropriate endpoint if len(cases_to_update) == 1: # Single case: use update_case/{case_id} case_info = cases_to_update[0] - case_update_data = {'labels': case_info['labels']} - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) elif len(cases_to_update) > 1: # Multiple cases: use update_cases/{suite_id} # Need to determine suite_id from the cases @@ -1210,62 +1221,72 @@ def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, if not case_suite_id: # Get suite_id from the first case if not provided first_case = all_cases[0] if all_cases else None - case_suite_id = first_case.get('suite_id') if first_case else None - + case_suite_id = first_case.get("suite_id") if first_case else None + if not case_suite_id: # Fall back to individual updates if no suite_id available for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) else: # Batch update using update_cases/{suite_id} batch_update_data = { - 'case_ids': [case_info['case_id'] for case_info in cases_to_update], - 'labels': cases_to_update[0]['labels'] # Assuming same labels for all cases + "case_ids": [case_info["case_id"] for case_info in cases_to_update], + "labels": cases_to_update[0]["labels"], # Assuming same labels for all cases } - + batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) - + if batch_response.status_code == 200: for case_info in cases_to_update: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: # If batch update fails, fall back to individual updates for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) - + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + return results, "" - def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: + def get_cases_by_label( + self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None + ) -> Tuple[List[dict], str]: """ Get test cases filtered by label ID or title - + :param project_id: Project ID :param suite_id: Suite ID (optional) :param label_ids: List of label IDs to filter by @@ -1276,234 +1297,228 @@ def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: L all_cases, error_message = self.__get_all_cases(project_id, suite_id) if error_message: return [], error_message - + # If filtering by title, first get the label ID target_label_ids = label_ids or [] if label_title and not target_label_ids: labels_data, error_message = self.get_labels(project_id) if error_message: return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + if not target_label_ids: return [], "" # No label found is a valid case with 0 results - + # Filter cases that have any of the target labels matching_cases = [] for case in all_cases: - case_labels = case.get('labels', []) - case_label_ids = [label.get('id') for label in case_labels] - + case_labels = case.get("labels", []) + case_label_ids = [label.get("id") for label in case_labels] + # Check if any of the target label IDs are present in this case if any(label_id in case_label_ids for label_id in target_label_ids): matching_cases.append(case) - + return matching_cases, "" - def add_labels_to_tests(self, test_ids: List[int], titles: Union[str, List[str]], project_id: int) -> Tuple[dict, str]: + def add_labels_to_tests( + self, test_ids: List[int], titles: Union[str, List[str]], project_id: int + ) -> Tuple[dict, str]: """ Add labels to multiple tests - + :param test_ids: List of test IDs :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) :param project_id: Project ID for validation :returns: Tuple with response data and error string """ # Initialize results structure - results = { - 'successful_tests': [], - 'failed_tests': [], - 'max_labels_reached': [], - 'test_not_found': [] - } - + results = {"successful_tests": [], "failed_tests": [], "max_labels_reached": [], "test_not_found": []} + # Normalize titles to a list if isinstance(titles, str): title_list = [titles] else: title_list = titles - + # At this point, title_list should already be validated by the CLI # Just ensure we have clean titles title_list = [title.strip() for title in title_list if title.strip()] - + if not title_list: return {}, "No valid labels provided" - + # Validate test IDs by getting run information for each test valid_test_ids = [] for test_id in test_ids: # Get test information to validate it exists test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) continue - + test_data = test_response.response_text # Validate that the test belongs to the correct project - run_id = test_data.get('run_id') + run_id = test_data.get("run_id") if run_id: run_response = self.client.send_get(f"get_run/{run_id}") if run_response.status_code == 200: run_data = run_response.response_text - if run_data.get('project_id') == project_id: + if run_data.get("project_id") == project_id: valid_test_ids.append(test_id) else: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) else: - results['test_not_found'].append(test_id) + results["test_not_found"].append(test_id) else: - results['test_not_found'].append(test_id) - + results["test_not_found"].append(test_id) + # If no valid test IDs, return early if not valid_test_ids: return results, "" - + # Check if labels exist or create them existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - + # Process each title to get/create label IDs label_ids = [] label_id_to_title = {} # Map label IDs to their titles for title in title_list: # Find existing label with the same title label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") break - + # Create label if it doesn't exist if label_id is None: label_data, error_message = self.add_label(project_id, title) if error_message: return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + if label_id: label_ids.append(label_id) label_id_to_title[label_id] = title - + # Collect test data and validate constraints tests_to_update = [] for test_id in valid_test_ids: # Get current test to check existing labels test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results['failed_tests'].append({ - 'test_id': test_id, - 'error': f"Could not retrieve test {test_id}: {test_response.error_message}" - }) + results["failed_tests"].append( + {"test_id": test_id, "error": f"Could not retrieve test {test_id}: {test_response.error_message}"} + ) continue - + test_data = test_response.response_text - current_labels = test_data.get('labels', []) - current_label_ids = [label.get('id') for label in current_labels if label.get('id')] - + current_labels = test_data.get("labels", []) + current_label_ids = [label.get("id") for label in current_labels if label.get("id")] + new_label_ids = [] already_exists_titles = [] - + for label_id in label_ids: if label_id not in current_label_ids: new_label_ids.append(label_id) else: if label_id in label_id_to_title: already_exists_titles.append(label_id_to_title[label_id]) - + if not new_label_ids: - results['successful_tests'].append({ - 'test_id': test_id, - 'message': f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}" - }) + results["successful_tests"].append( + { + "test_id": test_id, + "message": f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}", + } + ) continue - + # Check maximum labels limit (10) if len(current_label_ids) + len(new_label_ids) > 10: - results['max_labels_reached'].append(test_id) + results["max_labels_reached"].append(test_id) continue - + # Prepare test for update updated_label_ids = current_label_ids + new_label_ids - + new_label_titles = [] for label_id in new_label_ids: if label_id in label_id_to_title: new_label_titles.append(label_id_to_title[label_id]) - - tests_to_update.append({ - 'test_id': test_id, - 'labels': updated_label_ids, - 'new_labels': new_label_ids, - 'new_label_titles': new_label_titles - }) - + + tests_to_update.append( + { + "test_id": test_id, + "labels": updated_label_ids, + "new_labels": new_label_ids, + "new_label_titles": new_label_titles, + } + ) + # Update tests using appropriate endpoint if len(tests_to_update) == 1: # Single test: use update_test/{test_id} test_info = tests_to_update[0] - test_update_data = {'labels': test_info['labels']} - + test_update_data = {"labels": test_info["labels"]} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - + if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) + new_label_titles = test_info.get("new_label_titles", []) new_label_count = len(new_label_titles) - + if new_label_count == 1: message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" elif new_label_count > 1: message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" else: message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) else: # Multiple tests: use individual updates to ensure each test gets its specific labels for test_info in tests_to_update: - test_update_data = {'labels': test_info['labels']} + test_update_data = {"labels": test_info["labels"]} update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - + if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) + new_label_titles = test_info.get("new_label_titles", []) new_label_count = len(new_label_titles) - + if new_label_count == 1: message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" elif new_label_count > 1: message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" else: message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) - + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) + return results, "" - def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None) -> Tuple[List[dict], str]: + def get_tests_by_label( + self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None + ) -> Tuple[List[dict], str]: """ Get tests filtered by label ID or title from specific runs - + :param project_id: Project ID :param label_ids: List of label IDs to filter by :param label_title: Label title to filter by @@ -1516,14 +1531,14 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label labels_data, error_message = self.get_labels(project_id) if error_message: return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + if not target_label_ids: return [], "" # No label found is a valid case with 0 results - + # Get runs for the project (either all runs or specific run IDs) if run_ids: # Use specific run IDs - validate they exist by getting run details @@ -1539,67 +1554,65 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label runs_response = self.client.send_get(f"get_runs/{project_id}") if runs_response.status_code != 200: return [], runs_response.error_message - + runs_data = runs_response.response_text - runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data - + runs = runs_data.get("runs", []) if isinstance(runs_data, dict) else runs_data + # Collect all tests from all runs matching_tests = [] for run in runs: - run_id = run.get('id') + run_id = run.get("id") if not run_id: continue - + # Get tests for this run tests_response = self.client.send_get(f"get_tests/{run_id}") if tests_response.status_code != 200: continue # Skip this run if we can't get tests - + tests_data = tests_response.response_text - tests = tests_data.get('tests', []) if isinstance(tests_data, dict) else tests_data - + tests = tests_data.get("tests", []) if isinstance(tests_data, dict) else tests_data + # Filter tests that have any of the target labels for test in tests: - test_labels = test.get('labels', []) - test_label_ids = [label.get('id') for label in test_labels] - + test_labels = test.get("labels", []) + test_label_ids = [label.get("id") for label in test_labels] + # Check if any of the target label IDs are present in this test if any(label_id in test_label_ids for label_id in target_label_ids): matching_tests.append(test) - + return matching_tests, "" def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: """ Get labels for specific tests - + :param test_ids: List of test IDs to get labels for :returns: Tuple with list of test label information and error string """ results = [] - + for test_id in test_ids: # Get test information test_response = self.client.send_get(f"get_test/{test_id}") if test_response.status_code != 200: - results.append({ - 'test_id': test_id, - 'error': f"Test {test_id} not found or inaccessible", - 'labels': [] - }) + results.append({"test_id": test_id, "error": f"Test {test_id} not found or inaccessible", "labels": []}) continue - + test_data = test_response.response_text - test_labels = test_data.get('labels', []) - - results.append({ - 'test_id': test_id, - 'title': test_data.get('title', 'Unknown'), - 'status_id': test_data.get('status_id'), - 'labels': test_labels, - 'error': None - }) - + test_labels = test_data.get("labels", []) + + results.append( + { + "test_id": test_id, + "title": test_data.get("title", "Unknown"), + "status_id": test_data.get("status_id"), + "labels": test_labels, + "error": None, + } + ) + return results, "" # Test case reference management methods @@ -1614,15 +1627,15 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - + case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - + existing_refs = case_data.get("refs", "") or "" + # Parse existing references existing_ref_list = [] if existing_refs: - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - + existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Deduplicate input references while preserving order deduplicated_input = [] seen = set() @@ -1631,24 +1644,24 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool if ref_clean and ref_clean not in seen: deduplicated_input.append(ref_clean) seen.add(ref_clean) - + # Add new references (avoid duplicates with existing) all_refs = existing_ref_list.copy() for ref in deduplicated_input: if ref not in all_refs: all_refs.append(ref) - + # Join all references - new_refs_string = ','.join(all_refs) - + new_refs_string = ",".join(all_refs) + # Validate total character limit if len(new_refs_string) > 2000: return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - + # Update the test case with new references - update_data = {'refs': new_refs_string} + update_data = {"refs": new_refs_string} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: @@ -1669,18 +1682,18 @@ def update_case_references(self, case_id: int, references: List[str]) -> Tuple[b if ref_clean and ref_clean not in seen: deduplicated_refs.append(ref_clean) seen.add(ref_clean) - + # Join references - new_refs_string = ','.join(deduplicated_refs) - + new_refs_string = ",".join(deduplicated_refs) + # Validate total character limit if len(new_refs_string) > 2000: return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - + # Update the test case with new references - update_data = {'refs': new_refs_string} + update_data = {"refs": new_refs_string} update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: @@ -1695,36 +1708,36 @@ def delete_case_references(self, case_id: int, specific_references: List[str] = """ if specific_references is None: # Delete all references by setting refs to empty string - update_data = {'refs': ''} + update_data = {"refs": ""} else: # First get the current test case to retrieve existing references case_response = self.client.send_get(f"get_case/{case_id}") if case_response.status_code != 200: return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - + case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - + existing_refs = case_data.get("refs", "") or "" + if not existing_refs: # No references to delete return True, "" - + # Parse existing references - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - + existing_ref_list = [ref.strip() for ref in existing_refs.split(",") if ref.strip()] + # Deduplicate input references for efficient processing refs_to_delete = set(ref.strip() for ref in specific_references if ref.strip()) - + # Remove specific references remaining_refs = [ref for ref in existing_ref_list if ref not in refs_to_delete] - + # Join remaining references - new_refs_string = ','.join(remaining_refs) - update_data = {'refs': new_refs_string} - + new_refs_string = ",".join(remaining_refs) + update_data = {"refs": new_refs_string} + # Update the test case update_response = self.client.send_post(f"update_case/{case_id}", update_data) - + if update_response.status_code == 200: return True, "" else: From 7f9127ccfa9277bc42b2dd8d295681a295da83b3 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 27 Nov 2025 13:25:31 +0800 Subject: [PATCH 08/10] Update pr-validation.yml --- .github/workflows/pr-validation.yml | 66 ++++++++++++++--------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 215d98a..e4c9881 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -23,30 +23,31 @@ jobs: PR_BODY="${{ github.event.pull_request.body }}" # Check if PR title or body contains issue reference - # Accepts: TRCLI-### (JIRA), GIT-### (GitHub), #123 (GitHub), issues/123 - if echo "$PR_TITLE $PR_BODY" | grep -qE "TRCLI-[0-9]+|GIT-[0-9]+|#[0-9]+|issues/[0-9]+"; then - echo "issue_found=true" >> $GITHUB_OUTPUT + if echo "$PR_TITLE $PR_BODY" | grep -qE "(TRCLI-[0-9]+|GIT-[0-9]+|#[0-9]+|issues/[0-9]+)"; then + echo "issue_found=true" >> "$GITHUB_OUTPUT" - # Extract the issue key/number if echo "$PR_TITLE $PR_BODY" | grep -qE "TRCLI-[0-9]+"; then ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "TRCLI-[0-9]+" | head -1) - echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT - echo "issue_type=JIRA" >> $GITHUB_OUTPUT + echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT" + echo "issue_type=JIRA" >> "$GITHUB_OUTPUT" + elif echo "$PR_TITLE $PR_BODY" | grep -qE "GIT-[0-9]+"; then ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "GIT-[0-9]+" | head -1) - echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT - echo "issue_type=GitHub" >> $GITHUB_OUTPUT + echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT" + echo "issue_type=GitHub" >> "$GITHUB_OUTPUT" + elif echo "$PR_TITLE $PR_BODY" | grep -qE "#[0-9]+"; then ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "#[0-9]+" | head -1) - echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT - echo "issue_type=GitHub" >> $GITHUB_OUTPUT + echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT" + echo "issue_type=GitHub" >> "$GITHUB_OUTPUT" + elif echo "$PR_BODY" | grep -qE "issues/[0-9]+"; then ISSUE_KEY=$(echo "$PR_BODY" | grep -oE "issues/[0-9]+" | head -1) - echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT - echo "issue_type=GitHub" >> $GITHUB_OUTPUT + echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT" + echo "issue_type=GitHub" >> "$GITHUB_OUTPUT" fi else - echo "issue_found=false" >> $GITHUB_OUTPUT + echo "issue_found=false" >> "$GITHUB_OUTPUT" fi - name: Comment on PR if issue reference missing @@ -61,18 +62,15 @@ jobs: repo: context.repo.repo, body: `## ⚠️ Missing Issue Reference - This PR does not reference an issue. Please include a reference in either: + This PR does not reference an issue. Please include one. - **JIRA tickets:** - - PR title: "feat(api): TRCLI-123 Add new endpoint" - - PR description: "Resolves TRCLI-123" + **JIRA example:** + - TRCLI-123 - **GitHub issues:** - - PR title: "feat(api): GIT-123 Add new endpoint" - - PR description: "Resolves GIT-123" or "Fixes #123" - - Or link to the GitHub issue - - This helps with tracking and project management. Thank you!` + **GitHub examples:** + - GIT-123 + - Fixes #123 + - issues/123` }) - name: Check PR Description Completeness @@ -80,23 +78,22 @@ jobs: run: | PR_BODY="${{ github.event.pull_request.body }}" - # Check for required sections if echo "$PR_BODY" | grep -q "Issue being resolved"; then - echo "has_issue=true" >> $GITHUB_OUTPUT + echo "has_issue=true" >> "$GITHUB_OUTPUT" else - echo "has_issue=false" >> $GITHUB_OUTPUT + echo "has_issue=false" >> "$GITHUB_OUTPUT" fi if echo "$PR_BODY" | grep -q "Solution description"; then - echo "has_solution=true" >> $GITHUB_OUTPUT + echo "has_solution=true" >> "$GITHUB_OUTPUT" else - echo "has_solution=false" >> $GITHUB_OUTPUT + echo "has_solution=false" >> "$GITHUB_OUTPUT" fi if echo "$PR_BODY" | grep -q "Steps to test"; then - echo "has_test_steps=true" >> $GITHUB_OUTPUT + echo "has_test_steps=true" >> "$GITHUB_OUTPUT" else - echo "has_test_steps=false" >> $GITHUB_OUTPUT + echo "has_test_steps=false" >> "$GITHUB_OUTPUT" fi - name: Generate PR Validation Summary @@ -107,6 +104,7 @@ jobs: const issueFound = '${{ steps.check_issue.outputs.issue_found }}' === 'true'; const issueKey = '${{ steps.check_issue.outputs.issue_key }}'; const issueType = '${{ steps.check_issue.outputs.issue_type }}'; + const hasIssue = '${{ steps.check_description.outputs.has_issue }}' === 'true'; const hasSolution = '${{ steps.check_description.outputs.has_solution }}' === 'true'; const hasTestSteps = '${{ steps.check_description.outputs.has_test_steps }}' === 'true'; @@ -124,9 +122,9 @@ jobs: | Solution Description | ${hasSolution ? '✅ Present' : '⚠️ Missing'} | | Test Steps | ${hasTestSteps ? '✅ Present' : '⚠️ Missing'} | - ${issueFound && hasSolution && hasTestSteps ? '✅ All checks passed!' : '⚠️ Some optional sections are missing. Consider adding them for better review context.'} + ${issueFound && hasSolution && hasTestSteps + ? '✅ All checks passed!' + : '⚠️ Some optional sections are missing.'} `; - await core.summary - .addRaw(summary) - .write(); + await core.summary.addRaw(summary).write(); From 41c39fb876045db4c7c662674e1f5f7971450ead Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 9 Dec 2025 18:07:08 +0800 Subject: [PATCH 09/10] Updated readme and changelog for 1.12.5 release --- CHANGELOG.MD | 6 ++++-- README.md | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index bee7f26..95b5717 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -8,11 +8,13 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb ## [1.12.5] -_released 11-23-2025 +_released 12-09-2025 ### Fixed + - Added a new option --parallel-pagination for handling large test result uploads with optimized processing and improved error handling. - Fixed an issue where adding labels to project fails using label add command - - Fixed an issue where failed attachment upload errors (due to file size being too large) is not displayed in standard output. + - Fixed an issue where failed attachment upload errors (e.g. due to file size being too large) is not displayed in standard output. + - Fixed an issue where test cases are deleted in existing test runs with configs in a test plan ## [1.12.4] diff --git a/README.md b/README.md index 8f4dee5..aff70be 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,8 @@ Options: 'username:password'. --noproxy Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1). + --parallel-pagination Enable parallel pagination for faster case fetching + (experimental). --help Show this message and exit. Commands: @@ -1341,6 +1343,50 @@ Average time for uploading: - 2000 test cases was around 460 seconds - 5000 test cases was around 1000 seconds +### Parallel Pagination (Experimental) + +The TestRail CLI includes an experimental `--parallel-pagination` option that significantly improves performance when fetching large numbers of test cases from TestRail. This feature uses parallel fetching to retrieve multiple pages of results concurrently, rather than fetching them sequentially. + +#### When to Use Parallel Pagination + +Use `--parallel-pagination` when: +- Working with projects that have thousands of test cases +- Fetching test cases takes a long time during operations +- You need faster case matching and validation during result uploads + +#### How It Works + +When enabled, parallel pagination: +1. Fetches the first page to determine total pages available +2. Uses a thread pool (default: 10 workers set by `MAX_WORKERS_PARALLEL_PAGINATION` in `trcli/settings.py`) to fetch remaining pages concurrently +3. Automatically handles batching to avoid overwhelming the server +4. Combines all results efficiently for processing + +#### Usage + +Enable parallel pagination by adding the `--parallel-pagination` flag to any command: + +```shell +# Enable parallel pagination for faster case fetching during result upload +$ trcli parse_junit -f results.xml --parallel-pagination \ + --host https://yourinstance.testrail.io --username --password \ + --project "Your Project" + +# Example with parse_robot +$ trcli parse_robot -f output.xml --parallel-pagination \ + --host https://yourinstance.testrail.io --username --password \ + --project "Your Project" +``` + +You can also enable this feature globally by setting `ENABLE_PARALLEL_PAGINATION = True` in `trcli/settings.py`. The CLI flag takes precedence over the settings file. + +#### Performance Considerations + +- This feature is most beneficial when dealing with large test case repositories (1000+ cases) +- The default worker count is set to 10, which provides a good balance between speed and server load +- For smaller projects with few test cases, the performance improvement may be negligible +- This is an experimental feature - please report any issues you encounter + Contributing ------------ From c6e759223056b3f25b6cc3b51c93ec10a873e33e Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 9 Dec 2025 18:28:21 +0800 Subject: [PATCH 10/10] TRCLI-207 fixed some failing unit tests --- tests/test_api_request_handler.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/test_api_request_handler.py b/tests/test_api_request_handler.py index ca2079b..c22157c 100644 --- a/tests/test_api_request_handler.py +++ b/tests/test_api_request_handler.py @@ -1062,8 +1062,9 @@ def test_update_run_with_include_all_false_plan_without_config( assert payload["include_all"] == False, "include_all should be False" assert "case_ids" in payload, "case_ids should be present" assert 50 in payload["case_ids"], "Should include existing case ID" - - def test_upload_attachments_413_error(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path): + + @pytest.mark.api_handler + def test_upload_attachments_413_error(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path): """Test that 413 errors (file too large) are properly reported.""" run_id = 1 @@ -1137,6 +1138,10 @@ def test_upload_attachments_file_not_found(self, api_request_handler: ApiRequest # Mock get_tests endpoint mocked_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, "tests": [{"id": 1001, "case_id": 100}], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response) @@ -1147,4 +1152,3 @@ def test_upload_attachments_file_not_found(self, api_request_handler: ApiRequest # Call upload_attachments - should not raise exception api_request_handler.upload_attachments(report_results, results, run_id) -