From 66546574dbed50a47546c1709a23cd0097e080ae Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 5 Aug 2025 20:25:17 +0800 Subject: [PATCH 01/36] Update for release 1.12.0 --- CHANGELOG.MD | 7 +++++++ README.md | 13 +++++++++---- trcli/__init__.py | 2 +- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 8596f01..ca6fcdf 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -6,6 +6,13 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb - **MINOR**: New features that are backward-compatible. - **PATCH**: Bug fixes or minor changes that do not affect backward compatibility. +## [1.12.0] + +_released 08-12-2025 + +### Added + - Added Label management facility for Projects, Test Runs and Test Cases + ## [1.11.0] _released 07-30-2025 diff --git a/README.md b/README.md index b4250ad..4ce9032 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.11.0 +TestRail CLI v1.12.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) @@ -45,7 +45,7 @@ CLI general reference -------- ```shell $ trcli --help -TestRail CLI v1.11.0 +TestRail CLI v1.12.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli [OPTIONS] COMMAND [ARGS]... @@ -81,6 +81,7 @@ Options: Commands: add_run Add a new test run in TestRail + labels Label management facility for Projects, Test Run and Test cases parse_junit Parse JUnit report and upload results to TestRail parse_openapi Parse OpenAPI spec and create cases in TestRail parse_robot Parse Robot Framework report and upload results to TestRail @@ -284,10 +285,14 @@ tests are run across parallel, independent test nodes, all nodes should report t First, use the `add_run` command to create a new run; then, pass the run title and id to each of the test nodes, which will be used to upload all results into the same test run. +#### Labels Management + +Manage labels for **Projects**, **Test Cases**, and **Test Runs** using the `labels` command. Labels help categorize and organize your test management assets efficiently. + ### Reference ```shell $ trcli add_run --help -TestRail CLI v1.11.0 +TestRail CLI v1.12.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli add_run [OPTIONS] @@ -329,7 +334,7 @@ providing you with a solid base of test cases, which you can further expand on T ### Reference ```shell $ trcli parse_openapi --help -TestRail CLI v1.11.0 +TestRail CLI v1.12.0 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli parse_openapi [OPTIONS] diff --git a/trcli/__init__.py b/trcli/__init__.py index f84c53b..b518f6e 100644 --- a/trcli/__init__.py +++ b/trcli/__init__.py @@ -1 +1 @@ -__version__ = "1.11.0" +__version__ = "1.12.0" From e723052d0898da6a501972c6855eddf487cd51bb Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 7 Aug 2025 16:29:40 +0800 Subject: [PATCH 02/36] TRCLI-78 Implement labels management command, added unit test and test data --- tests/test_api_client.py | 148 ++++++++++ tests/test_api_request_handler_labels.py | 344 ++++++++++++++++++++++ tests/test_cmd_labels.py | 348 +++++++++++++++++++++++ tests/test_data/cli_test_data.py | 3 +- trcli/api/api_client.py | 53 +++- trcli/api/api_request_handler.py | 82 ++++++ trcli/commands/cmd_labels.py | 225 +++++++++++++++ trcli/constants.py | 4 +- 8 files changed, 1191 insertions(+), 16 deletions(-) create mode 100644 tests/test_api_request_handler_labels.py create mode 100644 tests/test_cmd_labels.py create mode 100644 trcli/commands/cmd_labels.py diff --git a/tests/test_api_client.py b/tests/test_api_client.py index 3f4e0bf..49e95a4 100644 --- a/tests/test_api_client.py +++ b/tests/test_api_client.py @@ -1,4 +1,5 @@ import pytest +from unittest.mock import patch, MagicMock from trcli.constants import FAULT_MAPPING from trcli.cli import Environment from trcli.api.api_client import APIClient @@ -280,3 +281,150 @@ def test_timeout_is_parsed_and_validated( else: with pytest.raises(AssertionError): environment.log.assert_has_calls([mocker.call(TIMEOUT_PARSE_ERROR)]) + + @pytest.mark.api_client + @patch('requests.post') + def test_send_post_with_json_default(self, mock_post, api_resources_maker): + """Test that send_post uses JSON by default""" + # Mock response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {"id": 1, "title": "Test"} + mock_response.content = b'{"id": 1, "title": "Test"}' + mock_post.return_value = mock_response + + # Create API client + api_client = api_resources_maker() + + # Call send_post without as_form_data parameter (should default to JSON) + result = api_client.send_post("test_endpoint", {":title": "Test Label"}) + + # Verify the result + assert result.status_code == 200 + assert result.response_text == {"id": 1, "title": "Test"} + + # Verify JSON was used + mock_post.assert_called_once() + call_args = mock_post.call_args + + # Should use json parameter, not data + assert 'json' in call_args[1] + assert 'data' not in call_args[1] + assert call_args[1]['json'] == {":title": "Test Label"} + + # Should have JSON content type header + headers = call_args[1]['headers'] + assert headers.get('Content-Type') == 'application/json' + + @pytest.mark.api_client + @patch('requests.post') + def test_send_post_with_form_data_true(self, mock_post, api_resources_maker): + """Test that send_post uses form-data when as_form_data=True""" + # Mock response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {"id": 1, "title": "Test"} + mock_response.content = b'{"id": 1, "title": "Test"}' + mock_post.return_value = mock_response + + # Create API client + api_client = api_resources_maker() + + # Call send_post with as_form_data=True + result = api_client.send_post("test_endpoint", {":title": "Test Label"}, as_form_data=True) + + # Verify the result + assert result.status_code == 200 + assert result.response_text == {"id": 1, "title": "Test"} + + # Verify form-data was used + mock_post.assert_called_once() + call_args = mock_post.call_args + + # Should use data parameter, not json + assert 'data' in call_args[1] + assert 'json' not in call_args[1] + assert call_args[1]['data'] == {":title": "Test Label"} + + # Should NOT have files parameter (uses application/x-www-form-urlencoded) + assert 'files' not in call_args[1] or call_args[1]['files'] is None + + # Should NOT have JSON content type header when using form-data + headers = call_args[1]['headers'] + assert headers.get('Content-Type') != 'application/json' + + @pytest.mark.api_client + @patch('requests.post') + def test_send_post_with_form_data_false(self, mock_post, api_resources_maker): + """Test that send_post uses JSON when as_form_data=False explicitly""" + # Mock response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {"id": 1, "title": "Test"} + mock_response.content = b'{"id": 1, "title": "Test"}' + mock_post.return_value = mock_response + + # Create API client + api_client = api_resources_maker() + + # Call send_post with as_form_data=False + result = api_client.send_post("test_endpoint", {":title": "Test Label"}, as_form_data=False) + + # Verify the result + assert result.status_code == 200 + assert result.response_text == {"id": 1, "title": "Test"} + + # Verify JSON was used + mock_post.assert_called_once() + call_args = mock_post.call_args + + # Should use json parameter, not data + assert 'json' in call_args[1] + assert 'data' not in call_args[1] + assert call_args[1]['json'] == {":title": "Test Label"} + + # Should have JSON content type header + headers = call_args[1]['headers'] + assert headers.get('Content-Type') == 'application/json' + + @pytest.mark.api_client + @patch('requests.post') + def test_send_post_with_files_and_form_data(self, mock_post, api_resources_maker): + """Test that send_post handles files parameter with form-data""" + # Mock response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {"id": 1, "title": "Test"} + mock_response.content = b'{"id": 1, "title": "Test"}' + mock_post.return_value = mock_response + + # Create API client + api_client = api_resources_maker() + + # Call send_post with files and form-data + files = {"file1": "/path/to/file"} + result = api_client.send_post( + "test_endpoint", + {":title": "Test Label"}, + files=files, + as_form_data=True + ) + + # Verify the result + assert result.status_code == 200 + assert result.response_text == {"id": 1, "title": "Test"} + + # Verify form-data was used + mock_post.assert_called_once() + call_args = mock_post.call_args + + # Should use data parameter, not json + assert 'data' in call_args[1] + assert 'json' not in call_args[1] + assert call_args[1]['data'] == {":title": "Test Label"} + # Files should be passed through as provided (not replaced with empty dict) + assert call_args[1]['files'] == files + + # Should NOT have JSON content type header when using files + headers = call_args[1]['headers'] + assert headers.get('Content-Type') != 'application/json' diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py new file mode 100644 index 0000000..98e20ae --- /dev/null +++ b/tests/test_api_request_handler_labels.py @@ -0,0 +1,344 @@ +import pytest +from unittest.mock import Mock, patch, MagicMock +from pathlib import Path +import json +from serde.json import from_json + +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.api.api_client import APIClient, APIClientResult +from trcli.cli import Environment +from trcli.data_classes.dataclass_testrail import TestRailSuite +from trcli.data_classes.data_parsers import MatchersParser +from tests.helpers.api_client_helpers import TEST_RAIL_URL + + +@pytest.fixture(scope="function") +def labels_handler(): + """Create an ApiRequestHandler instance for testing label methods""" + api_client = APIClient(host_name=TEST_RAIL_URL) + environment = Environment() + environment.project = "Test Project" + environment.batch_size = 10 + environment.case_matcher = MatchersParser.AUTO + + + # Load test data + json_path = Path(__file__).parent / "test_data/json/api_request_handler.json" + with open(json_path) as file_json: + json_string = json.dumps(json.load(file_json)) + test_input = from_json(TestRailSuite, json_string) + + api_request = ApiRequestHandler(environment, api_client, test_input, verify=False) + return api_request + + +class TestApiRequestHandlerLabels: + """Test class for label management API methods""" + + def test_add_label_success(self, labels_handler): + """Test successful label addition""" + # Mock the API client response + mock_response = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Test Label"}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + result, error = labels_handler.add_label(project_id=1, title="Test Label") + + assert error is None + assert result["id"] == 1 + assert result["title"] == "Test Label" + + # Verify the API call was made with correct parameters + labels_handler.client.send_post.assert_called_once_with( + "add_label/1", + payload=None, + files={'title': (None, "Test Label")} + ) + + def test_add_label_api_error(self, labels_handler): + """Test label addition with API error""" + mock_response = APIClientResult( + status_code=400, + response_text=None, + error_message="Label title already exists" + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + result, error = labels_handler.add_label(project_id=1, title="Duplicate Label") + + assert error == "Label title already exists" + assert result is None + + def test_add_label_multipart_format(self, labels_handler): + """Test label addition uses multipart/form-data format""" + mock_response = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Test Label"}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + result, error = labels_handler.add_label(project_id=1, title="Test Label") + + assert error is None + # Verify multipart/form-data format is used + call_args = labels_handler.client.send_post.call_args + assert call_args[1]['payload'] is None + assert call_args[1]['files'] == {'title': (None, "Test Label")} + + def test_update_label_success(self, labels_handler): + """Test successful label update""" + mock_response = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Updated Label"}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + result, error = labels_handler.update_label( + label_id=1, project_id=1, title="Updated Label" + ) + + assert error is None + assert result["id"] == 1 + assert result["title"] == "Updated Label" + + # Verify the API call was made with correct parameters + labels_handler.client.send_post.assert_called_once_with( + "update_label/1", + payload=None, + files={'project_id': (None, '1'), 'title': (None, "Updated Label")} + ) + + def test_update_label_api_error(self, labels_handler): + """Test label update with API error""" + mock_response = APIClientResult( + status_code=403, + response_text=None, + error_message="No access to the project" + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + result, error = labels_handler.update_label( + label_id=1, project_id=1, title="Updated Label" + ) + + assert error == "No access to the project" + assert result is None + + def test_get_label_success(self, labels_handler): + """Test successful single label retrieval""" + mock_response = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test Label", + "created_by": "2", + "created_on": "1234567890" + }, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + result, error = labels_handler.get_label(label_id=1) + + assert error is None + assert result["id"] == 1 + assert result["title"] == "Test Label" + assert result["created_by"] == "2" + + # Verify the API call was made with correct parameters + labels_handler.client.send_get.assert_called_once_with("get_label/1") + + def test_get_label_not_found(self, labels_handler): + """Test single label retrieval when label not found""" + mock_response = APIClientResult( + status_code=400, + response_text=None, + error_message="Label not found" + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + result, error = labels_handler.get_label(label_id=999) + + assert error == "Label not found" + assert result is None + + def test_get_labels_success(self, labels_handler): + """Test successful labels listing""" + mock_response = APIClientResult( + status_code=200, + response_text={ + "offset": 0, + "limit": 250, + "size": 2, + "_links": {"next": None, "prev": None}, + "labels": [ + {"id": 1, "title": "Label 1", "created_by": "2", "created_on": "1234567890"}, + {"id": 2, "title": "Label 2", "created_by": "3", "created_on": "1234567891"} + ] + }, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + result, error = labels_handler.get_labels(project_id=1) + + assert error is None + assert result["size"] == 2 + assert len(result["labels"]) == 2 + assert result["labels"][0]["id"] == 1 + assert result["labels"][1]["id"] == 2 + + # Verify the API call was made with correct parameters + labels_handler.client.send_get.assert_called_once_with("get_labels/1") + + def test_get_labels_with_pagination(self, labels_handler): + """Test labels listing with custom pagination parameters""" + mock_response = APIClientResult( + status_code=200, + response_text={ + "offset": 10, + "limit": 5, + "size": 0, + "_links": {"next": None, "prev": None}, + "labels": [] + }, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + result, error = labels_handler.get_labels(project_id=1, offset=10, limit=5) + + assert error is None + assert result["offset"] == 10 + assert result["limit"] == 5 + assert len(result["labels"]) == 0 + + # Verify the API call was made with pagination parameters + labels_handler.client.send_get.assert_called_once_with("get_labels/1&offset=10&limit=5") + + def test_get_labels_with_default_pagination(self, labels_handler): + """Test labels listing with default pagination (should not add parameters)""" + mock_response = APIClientResult( + status_code=200, + response_text={ + "offset": 0, + "limit": 250, + "size": 1, + "labels": [{"id": 1, "title": "Label 1"}] + }, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + result, error = labels_handler.get_labels(project_id=1, offset=0, limit=250) + + assert error is None + # Should call without pagination parameters since they're defaults + labels_handler.client.send_get.assert_called_once_with("get_labels/1") + + def test_get_labels_api_error(self, labels_handler): + """Test labels listing with API error""" + mock_response = APIClientResult( + status_code=403, + response_text=None, + error_message="No access to the project" + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + result, error = labels_handler.get_labels(project_id=1) + + assert error == "No access to the project" + assert result is None + + def test_delete_labels_success(self, labels_handler): + """Test successful label deletion""" + mock_response = APIClientResult( + status_code=200, + response_text="Success", + error_message=None + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + success, error = labels_handler.delete_labels(label_ids=[1, 2, 3]) + + assert success is True + assert error is None + + # Verify the API call was made with correct parameters + labels_handler.client.send_post.assert_called_once_with( + "delete_labels", + payload=None, + files={"label_id": (None, "1,2,3")} + ) + + def test_delete_label_single_id(self, labels_handler): + """Test single label deletion""" + mock_response = APIClientResult( + status_code=200, + response_text="Success", + error_message=None + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + success, error = labels_handler.delete_label(label_id=1) + + assert success is True + assert error is None + + labels_handler.client.send_post.assert_called_once_with( + "delete_label/1", + payload=None + ) + + def test_delete_labels_batch(self, labels_handler): + """Test batch label deletion with multiple IDs""" + mock_response = APIClientResult( + status_code=200, + response_text="Success", + error_message=None + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + success, error = labels_handler.delete_labels(label_ids=[1, 2, 3]) + + assert success is True + assert error is None + + labels_handler.client.send_post.assert_called_once_with( + "delete_labels", + payload=None, + files={"label_id": (None, "1,2,3")} + ) + + def test_delete_labels_api_error(self, labels_handler): + """Test label deletion with API error""" + mock_response = APIClientResult( + status_code=400, + response_text=None, + error_message="One or more labels not found" + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + success, error = labels_handler.delete_labels(label_ids=[999, 1000]) + + assert success is False + assert error == "One or more labels not found" + + def test_delete_labels_forbidden(self, labels_handler): + """Test label deletion with forbidden access""" + mock_response = APIClientResult( + status_code=403, + response_text=None, + error_message="No access to the project" + ) + + with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + success, error = labels_handler.delete_labels(label_ids=[1]) + + assert success is False + assert error == "No access to the project" \ No newline at end of file diff --git a/tests/test_cmd_labels.py b/tests/test_cmd_labels.py new file mode 100644 index 0000000..18a1a0e --- /dev/null +++ b/tests/test_cmd_labels.py @@ -0,0 +1,348 @@ +import pytest +from unittest import mock +from unittest.mock import MagicMock, patch +from click.testing import CliRunner + +from trcli.cli import Environment +from trcli.commands import cmd_labels +from trcli.data_classes.dataclass_testrail import TestRailSuite +from trcli.api.project_based_client import ProjectBasedClient + + +class TestCmdLabels: + """Test class for labels command functionality""" + + def setup_method(self): + """Set up test environment""" + self.runner = CliRunner() + self.environment = Environment(cmd="labels") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_success(self, mock_project_client): + """Test successful label addition""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.add_label.return_value = ( + {"label": {"id": 1, "title": "Test Label"}}, None + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.add, ['--title', 'Test Label'], + obj=self.environment) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_label.assert_called_once_with( + project_id=1, title='Test Label' + ) + mock_log.assert_any_call("Successfully added label: ID=1, Title='Test Label'") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_title_too_long(self, mock_project_client): + """Test label addition with title too long""" + long_title = "a" * 21 # 21 characters, exceeds 20 char limit + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.add, ['--title', long_title], + obj=self.environment) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Label title must be 20 characters or less.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_api_error(self, mock_project_client): + """Test label addition with API error""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.add_label.return_value = ( + None, "API Error: Label already exists" + ) + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.add, ['--title', 'Test Label'], + obj=self.environment) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Failed to add label: API Error: Label already exists") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_update_label_success(self, mock_project_client): + """Test successful label update""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.update_label.return_value = ( + {"id": 1, "title": "Updated Label"}, None + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.update, ['--id', '1', '--title', 'Updated Label'], + obj=self.environment) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.update_label.assert_called_once_with( + label_id=1, project_id=1, title='Updated Label' + ) + mock_log.assert_any_call("Successfully updated label: ID=1, Title='Updated Label'") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_update_label_title_too_long(self, mock_project_client): + """Test label update with title too long""" + long_title = "a" * 21 # 21 characters, exceeds 20 char limit + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.update, ['--id', '1', '--title', long_title], + obj=self.environment) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Label title must be 20 characters or less.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_delete_labels_success(self, mock_project_client): + """Test successful label deletion""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.api_request_handler.delete_labels.return_value = (True, None) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + # Use input to automatically confirm deletion + result = self.runner.invoke(cmd_labels.delete, ['--ids', '1,2,3'], + obj=self.environment, input='y\n') + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.delete_labels.assert_called_once_with([1, 2, 3]) + mock_log.assert_any_call("Successfully deleted 3 label(s)") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_delete_single_label_success(self, mock_project_client): + """Test successful single label deletion""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.api_request_handler.delete_label.return_value = (True, None) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + # Use input to automatically confirm deletion + result = self.runner.invoke(cmd_labels.delete, ['--ids', '1'], + obj=self.environment, input='y\n') + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.delete_label.assert_called_once_with(1) + mock_log.assert_any_call("Successfully deleted 1 label(s)") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_delete_labels_invalid_format(self, mock_project_client): + """Test label deletion with invalid ID format""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.delete, ['--ids', '1,abc,3'], + obj=self.environment, input='y\n') + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_delete_single_label_api_error(self, mock_project_client): + """Test single label deletion with API error""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + # Mock the single delete method since we're testing with a single ID + mock_client_instance.api_request_handler.delete_label.return_value = ( + False, "API Error: Label not found" + ) + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.delete, ['--ids', '1'], + obj=self.environment, input='y\n') + + assert result.exit_code == 1 + mock_elog.assert_called_with("Failed to delete labels: API Error: Label not found") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_delete_batch_labels_api_error(self, mock_project_client): + """Test batch label deletion with API error""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + # Mock the batch delete method since we're testing with multiple IDs + mock_client_instance.api_request_handler.delete_labels.return_value = ( + False, "API Error: Insufficient permissions" + ) + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.delete, ['--ids', '1,2,3'], + obj=self.environment, input='y\n') + + assert result.exit_code == 1 + mock_elog.assert_called_with("Failed to delete labels: API Error: Insufficient permissions") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_labels_success(self, mock_project_client): + """Test successful labels listing""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + + labels_response = { + "offset": 0, + "limit": 250, + "size": 2, + "labels": [ + {"id": 1, "title": "Label 1", "created_by": "2"}, + {"id": 2, "title": "Label 2", "created_by": "3"} + ] + } + mock_client_instance.api_request_handler.get_labels.return_value = (labels_response, None) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.list, [], obj=self.environment) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_labels.assert_called_once_with( + project_id=1, offset=0, limit=250 + ) + mock_log.assert_any_call("Found 2 labels (showing 1-2 of 2):") + mock_log.assert_any_call(" ID: 1, Title: 'Label 1', Created by: 2") + mock_log.assert_any_call(" ID: 2, Title: 'Label 2', Created by: 3") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_labels_with_pagination(self, mock_project_client): + """Test labels listing with custom pagination""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + + labels_response = {"offset": 10, "limit": 5, "size": 1, "labels": []} + mock_client_instance.api_request_handler.get_labels.return_value = (labels_response, None) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.list, ['--offset', '10', '--limit', '5'], + obj=self.environment) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_labels.assert_called_once_with( + project_id=1, offset=10, limit=5 + ) + mock_log.assert_any_call(" No labels found.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_get_label_success(self, mock_project_client): + """Test successful single label retrieval""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + + label_response = { + "id": 1, + "title": "Test Label", + "created_by": "2", + "created_on": "1234567890" + } + mock_client_instance.api_request_handler.get_label.return_value = (label_response, None) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.get, ['--id', '1'], obj=self.environment) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_label.assert_called_once_with(1) + mock_log.assert_any_call(" ID: 1") + mock_log.assert_any_call(" Title: 'Test Label'") + mock_log.assert_any_call(" Created by: 2") + mock_log.assert_any_call(" Created on: 1234567890") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_get_label_with_name_field(self, mock_project_client): + """Test single label retrieval with 'name' field instead of 'title'""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + + # Some responses might use 'name' instead of 'title' + label_response = { + "id": 1, + "name": "Test Label", + "created_by": "2", + "created_on": "1234567890" + } + mock_client_instance.api_request_handler.get_label.return_value = (label_response, None) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.get, ['--id', '1'], obj=self.environment) + + assert result.exit_code == 0 + mock_log.assert_any_call(" Title: 'Test Label'") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_get_label_api_error(self, mock_project_client): + """Test single label retrieval with API error""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.api_request_handler.get_label.return_value = ( + None, "API Error: Label not found" + ) + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.get, ['--id', '999'], obj=self.environment) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Failed to retrieve label: API Error: Label not found") + + def test_print_config(self): + """Test the print_config function""" + with patch.object(self.environment, 'log') as mock_log: + cmd_labels.print_config(self.environment, "Test") + + expected_message = ( + "Labels Test Execution Parameters" + "\n> TestRail instance: https://test.testrail.com (user: test@example.com)" + "\n> Project: Test Project" + ) + mock_log.assert_called_once_with(expected_message) \ No newline at end of file diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index 79bb242..9881239 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -66,6 +66,7 @@ ' - parse_junit: JUnit XML Files (& Similar)\n' ' - parse_robot: Robot Framework XML Files\n' ' - parse_openapi: OpenAPI YML Files\n' - ' - add_run: Create a new test run\n') + ' - add_run: Create a new test run\n' + ' - labels: Manage labels (add, update, delete, list)\n') trcli_help_description = "TestRail CLI" diff --git a/trcli/api/api_client.py b/trcli/api/api_client.py index 9beecf6..3add022 100644 --- a/trcli/api/api_client.py +++ b/trcli/api/api_client.py @@ -80,7 +80,7 @@ def send_get(self, uri: str) -> APIClientResult: """ return self.__send_request("GET", uri, None) - def send_post(self, uri: str, payload: dict = None, files: Dict[str, Path] = None) -> APIClientResult: + def send_post(self, uri: str, payload: dict = None, files: Dict[str, Path] = None, as_form_data: bool = False) -> APIClientResult: """ Sends POST request to host specified by host_name. Handles retries taking into consideration retries parameter. Retry will occur when one of the following happens: @@ -88,9 +88,9 @@ def send_post(self, uri: str, payload: dict = None, files: Dict[str, Path] = Non * timeout occurred * connection error occurred """ - return self.__send_request("POST", uri, payload, files) + return self.__send_request("POST", uri, payload, files, as_form_data) - def __send_request(self, method: str, uri: str, payload: dict, files: Dict[str, Path] = None) -> APIClientResult: + def __send_request(self, method: str, uri: str, payload: dict, files: Dict[str, Path] = None, as_form_data: bool = False) -> APIClientResult: status_code = -1 response_text = "" error_message = "" @@ -99,7 +99,7 @@ def __send_request(self, method: str, uri: str, payload: dict, files: Dict[str, auth = HTTPBasicAuth(username=self.username, password=password) headers = {"User-Agent": self.USER_AGENT} headers.update(self.__get_proxy_headers()) - if files is None: + if files is None and not as_form_data: headers["Content-Type"] = "application/json" verbose_log_message = "" proxies = self._get_proxies_for_request(url) @@ -110,16 +110,41 @@ def __send_request(self, method: str, uri: str, payload: dict, files: Dict[str, method=method, url=url, payload=payload ) if method == "POST": - response = requests.post( - url=url, - auth=auth, - json=payload, - timeout=self.timeout, - headers=headers, - verify=self.verify, - files=files, - proxies=proxies - ) + if as_form_data: + # Send as application/x-www-form-urlencoded (like curl --form) + request_kwargs = { + 'url': url, + 'auth': auth, + 'data': payload, + 'timeout': self.timeout, + 'headers': headers, + 'verify': self.verify, + 'proxies': proxies + } + if files: + request_kwargs['files'] = files + response = requests.post(**request_kwargs) + else: + # Handle different request types + request_kwargs = { + 'url': url, + 'auth': auth, + 'timeout': self.timeout, + 'headers': headers, + 'verify': self.verify, + 'proxies': proxies + } + + if files: + # When files are provided, send as multipart/form-data + request_kwargs['files'] = files + if payload: + request_kwargs['data'] = payload + else: + # When no files, send as JSON + request_kwargs['json'] = payload + + response = requests.post(**request_kwargs) else: response = requests.get( url=url, diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index b5623df..60a8907 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -733,3 +733,85 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ return entities, response.error_message else: return [], response.error_message + + # Label management methods + def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: + """ + Add a new label to the project + :param project_id: ID of the project + :param title: Title of the label (max 20 characters) + :returns: Tuple with created label data and error string + """ + # Use multipart/form-data like the working CURL command + files = {'title': (None, title)} + response = self.client.send_post(f"add_label/{project_id}", payload=None, files=files) + return response.response_text, response.error_message + + def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict, str]: + """ + Update an existing label + :param label_id: ID of the label to update + :param project_id: ID of the project + :param title: New title for the label (max 20 characters) + :returns: Tuple with updated label data and error string + """ + # Use multipart/form-data like add_label + files = { + 'project_id': (None, str(project_id)), + 'title': (None, title) # Field name is 'title' (no colon) for form data + } + response = self.client.send_post(f"update_label/{label_id}", payload=None, files=files) + return response.response_text, response.error_message + + def get_label(self, label_id: int) -> Tuple[dict, str]: + """ + Get a specific label by ID + :param label_id: ID of the label to retrieve + :returns: Tuple with label data and error string + """ + response = self.client.send_get(f"get_label/{label_id}") + return response.response_text, response.error_message + + def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tuple[dict, str]: + """ + Get all labels for a project with pagination + :param project_id: ID of the project + :param offset: Offset for pagination + :param limit: Limit for pagination + :returns: Tuple with labels data (including pagination info) and error string + """ + params = [] + if offset > 0: + params.append(f"offset={offset}") + if limit != 250: + params.append(f"limit={limit}") + + url = f"get_labels/{project_id}" + if params: + url += "&" + "&".join(params) + + response = self.client.send_get(url) + return response.response_text, response.error_message + + def delete_label(self, label_id: int) -> Tuple[bool, str]: + """ + Delete a single label + :param label_id: ID of the label to delete + :returns: Tuple with success status and error string + """ + response = self.client.send_post(f"delete_label/{label_id}", payload=None) + success = response.status_code == 200 + return success, response.error_message + + def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: + """ + Delete multiple labels + :param label_ids: List of label IDs to delete + :returns: Tuple with success status and error string + """ + # Send as form data with correct parameter name + label_ids_str = ",".join(map(str, label_ids)) + files = {"label_id": (None, label_ids_str)} # Note: parameter is 'label_id' not 'label_ids' + response = self.client.send_post("delete_labels", payload=None, files=files) + success = response.status_code == 200 + return success, response.error_message diff --git a/trcli/commands/cmd_labels.py b/trcli/commands/cmd_labels.py new file mode 100644 index 0000000..81a1a9c --- /dev/null +++ b/trcli/commands/cmd_labels.py @@ -0,0 +1,225 @@ +import click + +from trcli.api.project_based_client import ProjectBasedClient +from trcli.cli import pass_environment, CONTEXT_SETTINGS, Environment +from trcli.data_classes.dataclass_testrail import TestRailSuite + + +def print_config(env: Environment, action: str): + env.log(f"Labels {action} Execution Parameters" + f"\n> TestRail instance: {env.host} (user: {env.username})" + f"\n> Project: {env.project if env.project else env.project_id}") + + +@click.group(context_settings=CONTEXT_SETTINGS) +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, *args, **kwargs): + """Manage labels in TestRail""" + environment.cmd = "labels" + environment.set_parameters(context) + + +@cli.command() +@click.option("--title", required=True, metavar="", help="Title of the label to add (max 20 characters).") +@click.pass_context +@pass_environment +def add(environment: Environment, context: click.Context, title: str, *args, **kwargs): + """Add a new label in TestRail""" + environment.check_for_required_parameters() + print_config(environment, "Add") + + if len(title) > 20: + environment.elog("Error: Label title must be 20 characters or less.") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Adding label '{title}'...") + + label_data, error_message = project_client.api_request_handler.add_label( + project_id=project_client.project.project_id, + title=title + ) + + if error_message: + environment.elog(f"Failed to add label: {error_message}") + exit(1) + else: + # API response has nested structure: {'label': {'id': 5, 'title': 'hello', ...}} + label_info = label_data.get('label', label_data) # Handle both nested and flat responses + environment.log(f"Successfully added label: ID={label_info['id']}, Title='{label_info['title']}'") + + +@cli.command() +@click.option("--id", "label_id", required=True, type=int, metavar="", help="ID of the label to update.") +@click.option("--title", required=True, metavar="", help="New title for the label (max 20 characters).") +@click.pass_context +@pass_environment +def update(environment: Environment, context: click.Context, label_id: int, title: str, *args, **kwargs): + """Update an existing label in TestRail""" + environment.check_for_required_parameters() + print_config(environment, "Update") + + if len(title) > 20: + environment.elog("Error: Label title must be 20 characters or less.") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Updating label with ID {label_id}...") + + label_data, error_message = project_client.api_request_handler.update_label( + label_id=label_id, + project_id=project_client.project.project_id, + title=title + ) + + if error_message: + environment.elog(f"Failed to update label: {error_message}") + exit(1) + else: + # Handle both nested and flat API responses + label_info = label_data.get('label', label_data) + environment.log(f"Successfully updated label: ID={label_info['id']}, Title='{label_info['title']}'") + + +@cli.command() +@click.option("--ids", required=True, metavar="", help="Comma-separated list of label IDs to delete (e.g., 1,2,3).") +@click.confirmation_option(prompt="Are you sure you want to delete these labels?") +@click.pass_context +@pass_environment +def delete(environment: Environment, context: click.Context, ids: str, *args, **kwargs): + """Delete labels from TestRail""" + environment.check_for_required_parameters() + print_config(environment, "Delete") + + try: + label_ids = [int(id.strip()) for id in ids.split(",")] + except ValueError: + environment.elog("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Deleting labels with IDs: {', '.join(map(str, label_ids))}...") + + # Use single delete endpoint for one label, batch endpoint for multiple + if len(label_ids) == 1: + success, error_message = project_client.api_request_handler.delete_label(label_ids[0]) + else: + success, error_message = project_client.api_request_handler.delete_labels(label_ids) + + if error_message: + environment.elog(f"Failed to delete labels: {error_message}") + exit(1) + else: + environment.log(f"Successfully deleted {len(label_ids)} label(s)") + + +@cli.command() +@click.option("--offset", type=int, default=0, metavar="", help="Offset for pagination (default: 0).") +@click.option("--limit", type=int, default=250, metavar="", help="Limit for pagination (default: 250).") +@click.pass_context +@pass_environment +def list(environment: Environment, context: click.Context, offset: int, limit: int, *args, **kwargs): + """List all labels in the project""" + environment.check_for_required_parameters() + print_config(environment, "List") + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log("Retrieving labels...") + + labels_data, error_message = project_client.api_request_handler.get_labels( + project_id=project_client.project.project_id, + offset=offset, + limit=limit + ) + + if error_message: + environment.elog(f"Failed to retrieve labels: {error_message}") + exit(1) + else: + labels = labels_data.get('labels', []) + total_size = labels_data.get('size', len(labels)) + + environment.log(f"Found {len(labels)} labels (showing {offset + 1}-{offset + len(labels)} of {total_size}):") + environment.log("") + + if labels: + for label in labels: + environment.log(f" ID: {label['id']}, Title: '{label['title']}', Created by: {label.get('created_by', 'N/A')}") + else: + environment.log(" No labels found.") + + +@cli.command() +@click.option("--id", "label_id", required=True, type=int, metavar="", help="ID of the label to retrieve.") +@click.pass_context +@pass_environment +def get(environment: Environment, context: click.Context, label_id: int, *args, **kwargs): + """Get a specific label by ID""" + environment.check_for_required_parameters() + print_config(environment, "Get") + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Retrieving label with ID {label_id}...") + + label_data, error_message = project_client.api_request_handler.get_label(label_id) + + # Debug: Log what we received + if environment.verbose: + environment.log(f"Debug: API response: {label_data}") + environment.log(f"Debug: Error message: {error_message}") + + if error_message: + environment.elog(f"Failed to retrieve label: {error_message}") + exit(1) + elif not label_data: + environment.elog(f"No data received for label ID {label_id}") + exit(1) + else: + environment.log(f"Label details:") + + # Handle different possible response structures + if isinstance(label_data, dict): + # Check if it's a nested response like add_label + if 'label' in label_data: + label_info = label_data['label'] + else: + label_info = label_data + + # Ensure we have the basic required fields + if not label_info or not isinstance(label_info, dict): + environment.elog(f"Invalid label data received: {label_info}") + exit(1) + + environment.log(f" ID: {label_info.get('id', label_id)}") # Fallback to requested ID + environment.log(f" Title: '{label_info.get('title', label_info.get('name', 'N/A'))}'") + environment.log(f" Created by: {label_info.get('created_by', 'N/A')}") + environment.log(f" Created on: {label_info.get('created_on', 'N/A')}") + else: + environment.elog(f"Unexpected response format: {label_data}") + exit(1) \ No newline at end of file diff --git a/trcli/constants.py b/trcli/constants.py index 70b9604..dbe346e 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -71,6 +71,7 @@ parse_junit=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), parse_openapi=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), parse_robot=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), + labels=dict(**FAULT_MAPPING), ) PROMPT_MESSAGES = dict( @@ -91,7 +92,8 @@ - parse_junit: JUnit XML Files (& Similar) - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - - add_run: Create a new test run""" + - add_run: Create a new test run + - labels: Manage labels (add, update, delete, list)""" MISSING_COMMAND_SLOGAN = """Usage: trcli [OPTIONS] COMMAND [ARGS]...\nTry 'trcli --help' for help. \nError: Missing command.""" From b05aa13e1d5956a35eb0ecf4e29ecee0a5f70ab6 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 7 Aug 2025 16:30:24 +0800 Subject: [PATCH 03/36] TRCLI-78 Added functional tests --- tests_e2e/test_end2end.py | 429 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 429 insertions(+) diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 8d18e4f..ebf3311 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -34,6 +34,31 @@ def _assert_contains(text: str, expected_text_list: list): assert expected in text, f"Expected to find {expected} in: \n{text}" +def _run_cmd_allow_failure(multiline_cmd: str): + """Run command and return output and return code (allows non-zero exit codes)""" + lines_list = [] + for line in multiline_cmd.splitlines(): + line = line.strip() + if not line: + continue + if line.endswith("\\"): + lines_list.append(line.rstrip("\\").strip()) + else: + lines_list.append(f"{line} &&") + lines_list.append("echo DONE") + single_line_cmd = " ".join(lines_list) + print("") + print(f"Executing: {single_line_cmd}") + process = subprocess.Popen(single_line_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + with process.stdout: + output = "" + for line in iter(process.stdout.readline, b''): + output += line.decode() + print(output) + process.wait() + return output, process.returncode + + class TestsEndToEnd: # TestRail 101 instance has the required configuration for this test run @@ -501,4 +526,408 @@ def bug_test_automation_id(self): "Submitted 6 test results" ] ) + + def test_labels_full_workflow(self): + """Test complete labels workflow: add, list, get, update, delete""" + + # Step 1: Add a new label + add_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "E2E-Test-Label" + """) + _assert_contains( + add_output, + [ + "Adding label 'E2E-Test-Label'...", + "Successfully added label: ID=", + "Title='E2E-Test-Label'" + ] + ) + + # Extract label ID from the add output + import re + label_id_match = re.search(r"ID=(\d+)", add_output) + assert label_id_match, f"Could not find label ID in output: {add_output}" + label_id = label_id_match.group(1) + print(f"Created label with ID: {label_id}") + + # Step 2: List labels to verify it exists + list_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels list + """) + _assert_contains( + list_output, + [ + "Retrieving labels...", + "Found", + f"ID: {label_id}, Title: 'E2E-Test-Label'" + ] + ) + + # Step 3: Get the specific label + get_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels get \\ + --id {label_id} + """) + _assert_contains( + get_output, + [ + f"Retrieving label with ID {label_id}...", + "Label details:", + f"ID: {label_id}", + "Title: 'E2E-Test-Label'" + ] + ) + + # Step 4: Update the label + update_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels update \\ + --id {label_id} \\ + --title "Updated-E2E-Label" + """) + _assert_contains( + update_output, + [ + f"Updating label with ID {label_id}...", + f"Successfully updated label: ID={label_id}", + "Title='Updated-E2E-Label'" + ] + ) + + # Step 5: Verify the update by getting the label again + get_updated_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels get \\ + --id {label_id} + """) + _assert_contains( + get_updated_output, + [ + f"ID: {label_id}", + "Title: 'Updated-E2E-Label'" + ] + ) + + # Step 6: Delete the label (with confirmation) + delete_output = _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_id} + """) + _assert_contains( + delete_output, + [ + f"Deleting labels with IDs: {label_id}...", + "Successfully deleted 1 label(s)" + ] + ) + + def test_labels_add_multiple_and_delete_multiple(self): + """Test adding multiple labels and deleting them in batch""" + + # Add first label + add_output1 = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "Batch-Test-1" + """) + + # Add second label + add_output2 = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "Batch-Test-2" + """) + + # Add third label + add_output3 = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "Batch-Test-3" + """) + + # Extract all label IDs + import re + label_id1 = re.search(r"ID=(\d+)", add_output1).group(1) + label_id2 = re.search(r"ID=(\d+)", add_output2).group(1) + label_id3 = re.search(r"ID=(\d+)", add_output3).group(1) + + label_ids = f"{label_id1},{label_id2},{label_id3}" + print(f"Created labels with IDs: {label_ids}") + + # Verify all labels exist in list + list_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels list + """) + _assert_contains( + list_output, + [ + f"ID: {label_id1}, Title: 'Batch-Test-1'", + f"ID: {label_id2}, Title: 'Batch-Test-2'", + f"ID: {label_id3}, Title: 'Batch-Test-3'" + ] + ) + + # Delete all labels in batch + delete_output = _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_ids} + """) + _assert_contains( + delete_output, + [ + f"Deleting labels with IDs: {label_ids}...", + "Successfully deleted 3 label(s)" + ] + ) + + def test_labels_help_commands(self): + """Test labels help functionality""" + + # Test main labels help + main_help_output = _run_cmd(f""" +trcli labels --help + """) + _assert_contains( + main_help_output, + [ + "Manage labels in TestRail", + "add Add a new label in TestRail", + "delete Delete labels from TestRail", + "get Get a specific label by ID", + "list List all labels in the project", + "update Update an existing label in TestRail" + ] + ) + + # Test add command help + add_help_output = _run_cmd(f""" +trcli labels add --help + """) + _assert_contains( + add_help_output, + [ + "Add a new label in TestRail", + "--title", + "Title of the label to add (max 20 characters)" + ] + ) + + # Test update command help + update_help_output = _run_cmd(f""" +trcli labels update --help + """) + _assert_contains( + update_help_output, + [ + "Update an existing label in TestRail", + "--id", + "--title", + "ID of the label to update", + "New title for the label (max 20 characters)" + ] + ) + + # Test delete command help + delete_help_output = _run_cmd(f""" +trcli labels delete --help + """) + _assert_contains( + delete_help_output, + [ + "Delete labels from TestRail", + "--ids", + "Comma-separated list of label IDs to delete" + ] + ) + + # Test list command help + list_help_output = _run_cmd(f""" +trcli labels list --help + """) + _assert_contains( + list_help_output, + [ + "List all labels in the project", + "--offset", + "--limit", + "Offset for pagination", + "Limit for pagination" + ] + ) + + # Test get command help + get_help_output = _run_cmd(f""" +trcli labels get --help + """) + _assert_contains( + get_help_output, + [ + "Get a specific label by ID", + "--id", + "ID of the label to retrieve" + ] + ) + + def test_labels_pagination(self): + """Test labels pagination functionality""" + + # Test basic list command + list_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels list + """) + _assert_contains( + list_output, + [ + "Retrieving labels...", + "Found" + ] + ) + + # Test pagination with limit + paginated_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels list \\ + --limit 5 + """) + _assert_contains( + paginated_output, + [ + "Retrieving labels...", + "Found" + ] + ) + + # Test pagination with offset and limit + offset_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels list \\ + --offset 0 \\ + --limit 10 + """) + _assert_contains( + offset_output, + [ + "Retrieving labels...", + "Found" + ] + ) + + def test_labels_validation_errors(self): + """Test labels validation and error handling""" + + # Test title too long (more than 20 characters) + long_title_output, returncode = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "ThisTitleIsWayTooLongForTheValidationLimit" + """) + # Should fail with validation error + assert returncode != 0, f"Expected validation error but command succeeded: {long_title_output}" + assert "Error: Label title must be 20 characters or less." in long_title_output + + # Test invalid label ID for get + invalid_get_output, returncode = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels get \\ + --id 999999 + """) + # Should fail with API error + assert returncode != 0, f"Expected API error but command succeeded: {invalid_get_output}" + assert "Failed to retrieve label:" in invalid_get_output + + # Test invalid label ID format for delete + invalid_delete_output, returncode = _run_cmd_allow_failure(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids "abc,def" + """) + # Should fail with format validation error + assert returncode != 0, f"Expected validation error but command succeeded: {invalid_delete_output}" + assert "Error: Invalid label IDs format" in invalid_delete_output + + def test_labels_edge_cases(self): + """Test labels edge cases and boundary conditions""" + + # Test with exactly 20 character title (boundary condition) + twenty_char_title = "ExactlyTwentyCharss!" # Exactly 20 characters + assert len(twenty_char_title) == 20, "Test title should be exactly 20 characters" + + add_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "{twenty_char_title}" + """) + _assert_contains( + add_output, + [ + f"Adding label '{twenty_char_title}'...", + "Successfully added label:" + ] + ) + + # Extract label ID for cleanup + import re + label_id_match = re.search(r"ID=(\d+)", add_output) + if label_id_match: + label_id = label_id_match.group(1) + + # Cleanup - delete the test label + delete_output = _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_id} + """) + _assert_contains( + delete_output, + [ + f"Deleting labels with IDs: {label_id}...", + "Successfully deleted 1 label(s)" + ] + ) \ No newline at end of file From 0384af05ef535d168b33fe941666f52c9cfe155c Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 7 Aug 2025 16:31:35 +0800 Subject: [PATCH 04/36] TRCLI-78 Added helper comments for functional tests --- tests_e2e/pytest.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests_e2e/pytest.ini b/tests_e2e/pytest.ini index 039d0a2..e913c23 100644 --- a/tests_e2e/pytest.ini +++ b/tests_e2e/pytest.ini @@ -1,2 +1,4 @@ [pytest] -log_cli = true \ No newline at end of file +log_cli = true +#when running functional tests, uncomment the following line to selectively run test based on function prefix +#python_functions = test_labels* \ No newline at end of file From a2a08195cfc3eac2e041996738d56771174d0e3d Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 7 Aug 2025 16:32:57 +0800 Subject: [PATCH 05/36] TRCLI-78 Added document and guide changes for Project level labels management --- README.md | 266 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 263 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 4ce9032..eae4c37 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,8 @@ The TestRail CLI currently supports: - **Uploading automated test results from JUnit reports** - **Uploading automated test results from Robot Framework reports** - **Auto-generating test cases from OpenAPI specifications** -- **Creating new test runs for results to be uploaded to.** +- **Creating new test runs for results to be uploaded to** +- **Managing project labels for better organization and categorization** To see further documentation about the TestRail CLI, please refer to the [TestRail CLI documentation pages](https://support.gurock.com/hc/en-us/articles/7146548750868-TestRail-CLI) @@ -39,6 +40,7 @@ Supported and loaded modules: - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new empty test run + - labels: Manage labels (add, update, delete, list) ``` CLI general reference @@ -81,7 +83,7 @@ Options: Commands: add_run Add a new test run in TestRail - labels Label management facility for Projects, Test Run and Test cases + labels Manage labels in TestRail parse_junit Parse JUnit report and upload results to TestRail parse_openapi Parse OpenAPI spec and create cases in TestRail parse_robot Parse Robot Framework report and upload results to TestRail @@ -287,7 +289,265 @@ will be used to upload all results into the same test run. #### Labels Management -Manage labels for **Projects**, **Test Cases**, and **Test Runs** using the `labels` command. Labels help categorize and organize your test management assets efficiently. +The TestRail CLI provides comprehensive label management capabilities for **Projects** using the `labels` command. Labels help categorize and organize your test management assets efficiently, making it easier to filter and manage test cases, runs, and projects. + +The `labels` command supports full CRUD (Create, Read, Update, Delete) operations: +- **Add** new labels to projects +- **List** existing labels with pagination support +- **Get** detailed information about specific labels +- **Update** existing label titles +- **Delete** single or multiple labels in batch + +##### Reference +```shell +$ trcli labels --help +Usage: trcli labels [OPTIONS] COMMAND [ARGS]... + + Manage labels in TestRail + +Options: + --help Show this message and exit. + +Commands: + add Add a new label in TestRail + delete Delete labels from TestRail + get Get a specific label by ID + list List all labels in the project + update Update an existing label in TestRail +``` + +##### Adding Labels +Create new labels for your project with a descriptive title (maximum 20 characters). + +```shell +# Add a single label +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels add --title "Critical" + +# Add a label for release management +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels add --title "Release-2.0" + +# Add a label for test categorization +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels add --title "Regression" +``` + +##### Listing Labels +View all labels in your project with optional pagination support. + +```shell +# List all labels (default: up to 250 labels) +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels list + +# List labels with pagination +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels list --limit 10 --offset 0 + +# List next page of labels +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels list --limit 10 --offset 10 +``` + +**Output example:** +``` +Retrieving labels... +Found 5 labels: + ID: 123, Title: 'Critical' + ID: 124, Title: 'Release-2.0' + ID: 125, Title: 'Regression' + ID: 126, Title: 'Bug-Fix' + ID: 127, Title: 'Performance' +``` + +##### Getting Label Details +Retrieve detailed information about a specific label by its ID. + +```shell +# Get details for a specific label +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels get --id 123 +``` + +**Output example:** +``` +Retrieving label with ID 123... +Label details: + ID: 123 + Title: 'Critical' + Created by: 2 + Created on: 1234567890 +``` + +##### Updating Labels +Modify the title of existing labels (maximum 20 characters). + +```shell +# Update a label's title +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels update --id 123 --title "High-Priority" +``` + +**Output example:** +``` +Updating label with ID 123... +Successfully updated label: ID=123, Title='High-Priority' +``` + +##### Deleting Labels +Remove single or multiple labels from your project. + +```shell +# Delete a single label +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels delete --ids 123 + +# Delete multiple labels (batch operation) +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels delete --ids "123,124,125" + +``` + +**Output example:** +``` +Are you sure you want to delete these labels? [y/N]: y +Deleting labels with IDs: 123,124... +Successfully deleted 2 label(s) +``` + +##### Common Use Cases + +**1. Release Management** +```shell +# Create release-specific labels +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Mobile App" \ + labels add --title "Sprint-42" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Mobile App" \ + labels add --title "Hotfix-2.1.3" +``` + +**2. Test Categorization** +```shell +# Create test type labels +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "API Tests" \ + labels add --title "Smoke" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "API Tests" \ + labels add --title "Integration" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "API Tests" \ + labels add --title "E2E" +``` + +**3. Priority and Severity** +```shell +# Create priority labels +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Bug Tracking" \ + labels add --title "P0-Critical" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Bug Tracking" \ + labels add --title "P1-High" +``` + +**4. Cleanup Operations** +```shell +# List all labels to identify unused ones +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Old Project" \ + labels list + +# Bulk delete obsolete labels +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Old Project" \ + labels delete --ids "100,101,102,103,104" +``` + +##### Command Options Reference + +**Add Command:** +```shell +$ trcli labels add --help +Options: + --title Title of the label to add (max 20 characters) [required] + --help Show this message and exit. +``` + +**List Command:** +```shell +$ trcli labels list --help +Options: + --offset Offset for pagination (default: 0) + --limit Limit for pagination (default: 250, max: 250) + --help Show this message and exit. +``` + +**Get Command:** +```shell +$ trcli labels get --help +Options: + --id ID of the label to retrieve [required] + --help Show this message and exit. +``` + +**Update Command:** +```shell +$ trcli labels update --help +Options: + --id ID of the label to update [required] + --title New title for the label (max 20 characters) [required] + --help Show this message and exit. +``` + +**Delete Command:** +```shell +$ trcli labels delete --help +Options: + --ids Comma-separated list of label IDs to delete [required] + --help Show this message and exit. +``` + +##### Error Handling and Validation + +The labels command includes comprehensive validation: + +- **Title Length**: Label titles are limited to 20 characters maximum +- **ID Validation**: Label IDs must be valid integers +- **Batch Operations**: Multiple label IDs must be comma-separated +- **Confirmation Prompts**: Delete operations require user confirmation (can be bypassed with `-y`) + +**Example error scenarios:** +```shell +# Title too long (>20 characters) +$ trcli labels add --title "This title is way too long for validation" +Error: Label title must be 20 characters or less. + +# Invalid label ID +$ trcli labels get --id 999999 +Failed to retrieve label: Label not found + +# Invalid ID format in batch delete +$ trcli labels delete --ids "abc,def" +Error: Invalid label IDs format +``` ### Reference ```shell From 938ada17cc29d14efab6e4a47c18ae25af34c63c Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 7 Aug 2025 20:40:16 +0800 Subject: [PATCH 06/36] TRCLI-78 Corrected parameter name for batch delete labels from label_id to label_ids --- tests/test_api_request_handler_labels.py | 4 ++-- trcli/api/api_request_handler.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index 98e20ae..ba76dc3 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -273,7 +273,7 @@ def test_delete_labels_success(self, labels_handler): labels_handler.client.send_post.assert_called_once_with( "delete_labels", payload=None, - files={"label_id": (None, "1,2,3")} + files={"label_ids": (None, "1,2,3")} ) def test_delete_label_single_id(self, labels_handler): @@ -312,7 +312,7 @@ def test_delete_labels_batch(self, labels_handler): labels_handler.client.send_post.assert_called_once_with( "delete_labels", payload=None, - files={"label_id": (None, "1,2,3")} + files={"label_ids": (None, "1,2,3")} ) def test_delete_labels_api_error(self, labels_handler): diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 60a8907..ea11a13 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -811,7 +811,7 @@ def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: """ # Send as form data with correct parameter name label_ids_str = ",".join(map(str, label_ids)) - files = {"label_id": (None, label_ids_str)} # Note: parameter is 'label_id' not 'label_ids' + files = {"label_ids": (None, label_ids_str)} response = self.client.send_post("delete_labels", payload=None, files=files) success = response.status_code == 200 return success, response.error_message From bbfe4a2af25177886c01c91bd52f59f1398174df Mon Sep 17 00:00:00 2001 From: Christopher Geisel Date: Fri, 8 Aug 2025 15:11:30 -0700 Subject: [PATCH 07/36] update check_suite_id to handle paginated results --- tests/test_api_request_handler.py | 1 + trcli/api/api_request_handler.py | 37 ++++++++++++++++++------------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/tests/test_api_request_handler.py b/tests/test_api_request_handler.py index c02fd23..d4a83c1 100644 --- a/tests/test_api_request_handler.py +++ b/tests/test_api_request_handler.py @@ -163,6 +163,7 @@ def test_check_suite_exists_with_pagination(self, api_request_handler: ApiReques "offset": 0, "limit": 250, "size": 2, + "_links": {"next": None, "prev": None}, "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index b5623df..81e4f06 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -122,28 +122,27 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project error_message=error, ) - def check_suite_id(self, project_id: int) -> (bool, str): + def check_suite_id(self, project_id: int) -> Tuple[bool, str]: """ Check if suite from DataProvider exist using get_suites endpoint :project_id: project id :returns: True if exists in suites. False if not. """ suite_id = self.suites_data_from_provider.suite_id - response = self.client.send_get(f"get_suites/{project_id}") - if not response.error_message: - try: - parsed = json.loads(response.response_text) if isinstance(response.response_text, str) else response.response_text - suite_list = parsed.get("suites") if isinstance(parsed, dict) else parsed - available_suites = [suite["id"] for suite in suite_list] - return ( - (True, "") - if suite_id in available_suites - else (False, FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) - ) - except Exception as e: - return None, f"Error parsing suites response: {e}" + suites_data, error = self.__get_all_suites(project_id) + if not error: + available_suites = [ + suite + for suite in suites_data + if suite["id"] == suite_id + ] + return ( + (True, "") + if len(available_suites) > 0 + else (False, FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) + ) else: - return None, response.error_message + return None, suites_data.error_message def resolve_suite_id_using_name(self, project_id: int) -> Tuple[int, str]: """Get suite ID matching suite name on data provider or returns -1 if unable to match any suite. @@ -707,10 +706,16 @@ def __get_all_tests_in_run(self, run_id=None) -> Tuple[List[dict], str]: def __get_all_projects(self) -> Tuple[List[dict], str]: """ - Get all cases from all pages + Get all projects from all pages """ return self.__get_all_entities('projects', f"get_projects") + def __get_all_suites(self, project_id) -> Tuple[List[dict], str]: + """ + Get all suites from all pages + """ + return self.__get_all_entities('suites', f"get_suites/{project_id}") + def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[Dict], str]: """ Get all entities from all pages if number of entities is too big to return in single response. From 1d27e5a831d3fd4d4823a97daa96ea007571754b Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 14 Aug 2025 15:53:25 +0800 Subject: [PATCH 08/36] TRCLI-113 added implementation for labels management on test cases --- trcli/api/api_request_handler.py | 232 ++++++++++++++++++++++++++++++- trcli/commands/cmd_labels.py | 149 +++++++++++++++++++- 2 files changed, 379 insertions(+), 2 deletions(-) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index ea11a13..af633b9 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -691,7 +691,10 @@ def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], s """ Get all cases from all pages """ - return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") + if suite_id is None: + return self.__get_all_entities('cases', f"get_cases/{project_id}") + else: + return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ @@ -815,3 +818,230 @@ def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: response = self.client.send_post("delete_labels", payload=None, files=files) success = response.status_code == 200 return success, response.error_message + + def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, suite_id: int = None) -> Tuple[dict, str]: + """ + Add a label to multiple test cases + + :param case_ids: List of test case IDs + :param title: Label title (max 20 characters) + :param project_id: Project ID for validation + :param suite_id: Suite ID (optional) + :returns: Tuple with response data and error string + """ + # Initialize results structure + results = { + 'successful_cases': [], + 'failed_cases': [], + 'max_labels_reached': [], + 'case_not_found': [] + } + + # Check if project is multi-suite by getting all cases without suite_id + all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) + if error_message: + return results, error_message + + # Check if project has multiple suites + suite_ids = set() + for case in all_cases_no_suite: + if 'suite_id' in case and case['suite_id']: + suite_ids.add(case['suite_id']) + + # If project has multiple suites and no suite_id provided, require it + if len(suite_ids) > 1 and suite_id is None: + return results, "This project is multisuite, suite id is required" + + # Get all cases to validate that the provided case IDs exist + all_cases, error_message = self.__get_all_cases(project_id, suite_id) + if error_message: + return results, error_message + + # Create a set of existing case IDs for quick lookup + existing_case_ids = {case['id'] for case in all_cases} + + # Validate case IDs and separate valid from invalid ones + invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] + valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] + + # Record invalid case IDs + for case_id in invalid_case_ids: + results['case_not_found'].append(case_id) + + # If no valid case IDs, return early + if not valid_case_ids: + return results, "" + + # Check if label exists or create it + existing_labels, error_message = self.get_labels(project_id) + if error_message: + return results, error_message + + # Find existing label with the same title + label_id = None + for label in existing_labels.get('labels', []): + if label.get('title') == title: + label_id = label.get('id') + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get('label', label_data) + label_id = label_info.get('id') + + # Collect case data and validate constraints + cases_to_update = [] + for case_id in valid_case_ids: + # Get current case to check existing labels + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + results['failed_cases'].append({ + 'case_id': case_id, + 'error': f"Could not retrieve case {case_id}: {case_response.error_message}" + }) + continue + + case_data = case_response.response_text + current_labels = case_data.get('labels', []) + + # Check if label already exists on this case + if any(label.get('id') == label_id for label in current_labels): + results['successful_cases'].append({ + 'case_id': case_id, + 'message': f"Label '{title}' already exists on case {case_id}" + }) + continue + + # Check maximum labels limit (10) + if len(current_labels) >= 10: + results['max_labels_reached'].append(case_id) + continue + + # Prepare case for update + existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] + updated_label_ids = existing_label_ids + [label_id] + cases_to_update.append({ + 'case_id': case_id, + 'labels': updated_label_ids + }) + + # Update cases using appropriate endpoint + if len(cases_to_update) == 1: + # Single case: use update_case/{case_id} + case_info = cases_to_update[0] + case_update_data = {'labels': case_info['labels']} + + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) + + if update_response.status_code == 200: + results['successful_cases'].append({ + 'case_id': case_info['case_id'], + 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" + }) + else: + results['failed_cases'].append({ + 'case_id': case_info['case_id'], + 'error': update_response.error_message + }) + elif len(cases_to_update) > 1: + # Multiple cases: use update_cases/{suite_id} + # Need to determine suite_id from the cases + case_suite_id = suite_id + if not case_suite_id: + # Get suite_id from the first case if not provided + first_case = all_cases[0] if all_cases else None + case_suite_id = first_case.get('suite_id') if first_case else None + + if not case_suite_id: + # Fall back to individual updates if no suite_id available + for case_info in cases_to_update: + case_update_data = {'labels': case_info['labels']} + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) + + if update_response.status_code == 200: + results['successful_cases'].append({ + 'case_id': case_info['case_id'], + 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" + }) + else: + results['failed_cases'].append({ + 'case_id': case_info['case_id'], + 'error': update_response.error_message + }) + else: + # Batch update using update_cases/{suite_id} + batch_update_data = { + 'case_ids': [case_info['case_id'] for case_info in cases_to_update], + 'labels': cases_to_update[0]['labels'] # Assuming same labels for all cases + } + + batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) + + if batch_response.status_code == 200: + for case_info in cases_to_update: + results['successful_cases'].append({ + 'case_id': case_info['case_id'], + 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" + }) + else: + # If batch update fails, fall back to individual updates + for case_info in cases_to_update: + case_update_data = {'labels': case_info['labels']} + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) + + if update_response.status_code == 200: + results['successful_cases'].append({ + 'case_id': case_info['case_id'], + 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" + }) + else: + results['failed_cases'].append({ + 'case_id': case_info['case_id'], + 'error': update_response.error_message + }) + + return results, "" + + def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: + """ + Get test cases filtered by label ID or title + + :param project_id: Project ID + :param suite_id: Suite ID (optional) + :param label_ids: List of label IDs to filter by + :param label_title: Label title to filter by + :returns: Tuple with list of matching cases and error string + """ + # Get all cases first + all_cases, error_message = self.__get_all_cases(project_id, suite_id) + if error_message: + return [], error_message + + # If filtering by title, first get the label ID + target_label_ids = label_ids or [] + if label_title and not target_label_ids: + labels_data, error_message = self.get_labels(project_id) + if error_message: + return [], error_message + + for label in labels_data.get('labels', []): + if label.get('title') == label_title: + target_label_ids.append(label.get('id')) + + if not target_label_ids: + return [], "" # No label found is a valid case with 0 results + + # Filter cases that have any of the target labels + matching_cases = [] + for case in all_cases: + case_labels = case.get('labels', []) + case_label_ids = [label.get('id') for label in case_labels] + + # Check if any of the target label IDs are present in this case + if any(label_id in case_label_ids for label_id in target_label_ids): + matching_cases.append(case) + + return matching_cases, "" diff --git a/trcli/commands/cmd_labels.py b/trcli/commands/cmd_labels.py index 81a1a9c..e6ce726 100644 --- a/trcli/commands/cmd_labels.py +++ b/trcli/commands/cmd_labels.py @@ -222,4 +222,151 @@ def get(environment: Environment, context: click.Context, label_id: int, *args, environment.log(f" Created on: {label_info.get('created_on', 'N/A')}") else: environment.elog(f"Unexpected response format: {label_data}") - exit(1) \ No newline at end of file + exit(1) + + +@cli.group() +@click.pass_context +@pass_environment +def cases(environment: Environment, context: click.Context, *args, **kwargs): + """Manage labels for test cases""" + pass + + +@cases.command(name='add') +@click.option("--case-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") +@click.option("--title", required=True, metavar="", help="Title of the label to add (max 20 characters).") +@click.pass_context +@pass_environment +def add_to_cases(environment: Environment, context: click.Context, case_ids: str, title: str, *args, **kwargs): + """Add a label to test cases""" + environment.check_for_required_parameters() + print_config(environment, "Add Cases") + + if len(title) > 20: + environment.elog("Error: Label title must be 20 characters or less.") + exit(1) + + try: + case_id_list = [int(id.strip()) for id in case_ids.split(",")] + except ValueError: + environment.elog("Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Adding label '{title}' to {len(case_id_list)} test case(s)...") + + results, error_message = project_client.api_request_handler.add_labels_to_cases( + case_ids=case_id_list, + title=title, + project_id=project_client.project.project_id, + suite_id=environment.suite_id + ) + + if error_message: + environment.elog(f"Failed to add labels to cases: {error_message}") + exit(1) + else: + # Report results + successful_cases = results.get('successful_cases', []) + failed_cases = results.get('failed_cases', []) + max_labels_reached = results.get('max_labels_reached', []) + case_not_found = results.get('case_not_found', []) + + if case_not_found: + environment.elog(f"Error: {len(case_not_found)} test case(s) not found:") + for case_id in case_not_found: + environment.elog(f" Case ID {case_id} does not exist in the project") + + if successful_cases: + environment.log(f"Successfully processed {len(successful_cases)} case(s):") + for case_result in successful_cases: + environment.log(f" Case {case_result['case_id']}: {case_result['message']}") + + if max_labels_reached: + environment.log(f"Warning: {len(max_labels_reached)} case(s) already have maximum labels (10):") + for case_id in max_labels_reached: + environment.log(f" Case {case_id}: Maximum labels reached") + + if failed_cases: + environment.log(f"Failed to process {len(failed_cases)} case(s):") + for case_result in failed_cases: + environment.log(f" Case {case_result['case_id']}: {case_result['error']}") + + # Exit with error if there were invalid case IDs + if case_not_found: + exit(1) + + +@cases.command(name='list') +@click.option("--ids", metavar="", help="Comma-separated list of label IDs to filter by (e.g., 1,2,3).") +@click.option("--title", metavar="", help="Label title to filter by (max 20 characters).") +@click.pass_context +@pass_environment +def list_cases(environment: Environment, context: click.Context, ids: str, title: str, *args, **kwargs): + """List test cases filtered by label ID or title""" + environment.check_for_required_parameters() + + # Validate that either ids or title is provided + if not ids and not title: + environment.elog("Error: Either --ids or --title must be provided.") + exit(1) + + if title and len(title) > 20: + environment.elog("Error: Label title must be 20 characters or less.") + exit(1) + + print_config(environment, "List Cases by Label") + + label_ids = None + if ids: + try: + label_ids = [int(id.strip()) for id in ids.split(",")] + except ValueError: + environment.elog("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + if title: + environment.log(f"Retrieving test cases with label title '{title}'...") + else: + environment.log(f"Retrieving test cases with label IDs: {', '.join(map(str, label_ids))}...") + + matching_cases, error_message = project_client.api_request_handler.get_cases_by_label( + project_id=project_client.project.project_id, + suite_id=environment.suite_id, + label_ids=label_ids, + label_title=title + ) + + if error_message: + environment.elog(f"Failed to retrieve cases: {error_message}") + exit(1) + else: + environment.log(f"Found {len(matching_cases)} matching test case(s):") + environment.log("") + + if matching_cases: + for case in matching_cases: + case_labels = case.get('labels', []) + label_info = [] + for label in case_labels: + label_info.append(f"ID:{label.get('id')},Title:'{label.get('title')}'") + + labels_str = f" [Labels: {'; '.join(label_info)}]" if label_info else " [No labels]" + environment.log(f" Case ID: {case['id']}, Title: '{case['title']}'{labels_str}") + else: + if title: + environment.log(f" No test cases found with label title '{title}'.") + else: + environment.log(f" No test cases found with the specified label IDs.") \ No newline at end of file From cad3db57cad50374c5e815acbb36405773c9fabb Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 14 Aug 2025 15:53:57 +0800 Subject: [PATCH 09/36] TRCLI-113 added functional tests for labels management for test cases --- tests_e2e/test_end2end.py | 311 +++++++++++++++++++++++++++++++++++++- 1 file changed, 309 insertions(+), 2 deletions(-) diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index ebf3311..0d785dd 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -4,6 +4,11 @@ import pytest +def _has_testrail_credentials(): + """Check if TestRail credentials are available in environment variables""" + return bool(os.environ.get("TR_CLI_USERNAME") and os.environ.get("TR_CLI_PASSWORD")) + + def _run_cmd(multiline_cmd: str): lines_list = [] for line in multiline_cmd.splitlines(): @@ -64,8 +69,8 @@ class TestsEndToEnd: # TestRail 101 instance has the required configuration for this test run TR_INSTANCE = "https://testrail101.testrail.io/" # Uncomment and enter your credentials below in order to execute the tests locally - # os.environ.setdefault("TR_CLI_USERNAME", "") - # os.environ.setdefault("TR_CLI_PASSWORD", "") + #os.environ.setdefault("TR_CLI_USERNAME", "") + #os.environ.setdefault("TR_CLI_PASSWORD", "") @pytest.fixture(autouse=True, scope="module") def install_trcli(self): @@ -930,4 +935,306 @@ def test_labels_edge_cases(self): "Successfully deleted 1 label(s)" ] ) + + + def test_labels_cases_full_workflow(self): + """Test complete workflow of test case label operations""" + # First, create a test label + add_label_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "e2e-case-label" + """) + _assert_contains( + add_label_output, + [ + "Adding label 'e2e-case-label'...", + "Successfully added label:" + ] + ) + + # Extract label ID for later use + import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) + assert label_id_match, "Could not extract label ID from output" + label_id = label_id_match.group(1) + + try: + # Use known test case IDs that should exist in the test project + # These are typical case IDs that exist in the TestRail test environment + test_case_ids = ["24964", "24965"] # Multiple test cases for batch testing + + # Add labels to test cases (using single-suite project for batch testing) + add_cases_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases add \\ + --case-ids "{','.join(test_case_ids)}" \\ + --title "e2e-case-label" + """) + _assert_contains( + add_cases_output, + [ + f"Adding label 'e2e-case-label' to {len(test_case_ids)} test case(s)...", + "Successfully processed" + ] + ) + + # List test cases by label title (using single-suite project) + list_by_title_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --title "e2e-case-label" + """) + _assert_contains( + list_by_title_output, + [ + "Retrieving test cases with label title 'e2e-case-label'...", + "matching test case(s):" + ] + ) + + # List test cases by label ID (using single-suite project) + list_by_id_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --ids "{label_id}" + """) + _assert_contains( + list_by_id_output, + [ + f"Retrieving test cases with label IDs: {label_id}...", + "matching test case(s):" + ] + ) + + finally: + # Cleanup - delete the test label + delete_output = _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_id} + """) + _assert_contains( + delete_output, + [ + f"Deleting labels with IDs: {label_id}...", + "Successfully deleted 1 label(s)" + ] + ) + + def test_labels_cases_validation_errors(self): + """Test validation errors for test case label commands""" + # Test title too long for add cases + long_title_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases add \\ + --case-ids "1" \\ + --title "this-title-is-way-too-long-for-testrail" + """) + assert return_code != 0 + _assert_contains( + long_title_output, + ["Error: Label title must be 20 characters or less."] + ) + + # Test invalid case IDs format + invalid_ids_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases add \\ + --case-ids "invalid,ids" \\ + --title "test" + """) + assert return_code != 0 + _assert_contains( + invalid_ids_output, + ["Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + ) + + # Test missing filter for list cases + no_filter_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list + """) + assert return_code != 0 + _assert_contains( + no_filter_output, + ["Error: Either --ids or --title must be provided."] + ) + + # Test title too long for list cases + long_title_list_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --title "this-title-is-way-too-long-for-testrail" + """) + assert return_code != 0 + _assert_contains( + long_title_list_output, + ["Error: Label title must be 20 characters or less."] + ) + + def test_labels_cases_help_commands(self): + """Test help output for test case label commands""" + # Test main cases help + cases_help_output = _run_cmd("trcli labels cases --help") + _assert_contains( + cases_help_output, + [ + "Usage: trcli labels cases [OPTIONS] COMMAND [ARGS]...", + "Manage labels for test cases", + "add Add a label to test cases", + "list List test cases filtered by label ID or title" + ] + ) + + # Test cases add help + cases_add_help_output = _run_cmd("trcli labels cases add --help") + _assert_contains( + cases_add_help_output, + [ + "Usage: trcli labels cases add [OPTIONS]", + "Add a label to test cases", + "--case-ids", + "--title" + ] + ) + + # Test cases list help + cases_list_help_output = _run_cmd("trcli labels cases list --help") + _assert_contains( + cases_list_help_output, + [ + "Usage: trcli labels cases list [OPTIONS]", + "List test cases filtered by label ID or title", + "--ids", + "--title" + ] + ) + + def test_labels_cases_no_matching_cases(self): + """Test behavior when no test cases match the specified label""" + # Test with non-existent label title + no_match_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --title "non-existent-label" + """) + _assert_contains( + no_match_output, + [ + "Retrieving test cases with label title 'non-existent-label'...", + "Found 0 matching test case(s):", + "No test cases found with label title 'non-existent-label'." + ] + ) + + # Test with non-existent label ID + no_match_id_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --ids "99999" + """) + _assert_contains( + no_match_id_output, + [ + "Retrieving test cases with label IDs: 99999...", + "Found 0 matching test case(s):", + "No test cases found with the specified label IDs." + ] + ) + + def test_labels_cases_single_case_workflow(self): + """Test single case label operations using update_case endpoint""" + # First, create a test label + add_label_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "e2e-single-case" + """) + _assert_contains( + add_label_output, + [ + "Adding label 'e2e-single-case'...", + "Successfully added label:" + ] + ) + + # Extract label ID for later use + import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) + assert label_id_match, "Could not extract label ID from output" + label_id = label_id_match.group(1) + + try: + # Use single test case ID for testing update_case endpoint + single_case_id = "24964" + + # Add label to single test case + add_single_case_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases add \\ + --case-ids "{single_case_id}" \\ + --title "e2e-single-case" + """) + _assert_contains( + add_single_case_output, + [ + f"Adding label 'e2e-single-case' to 1 test case(s)...", + "Successfully processed 1 case(s):", + f"Successfully added label 'e2e-single-case' to case {single_case_id}" + ] + ) + + # Verify the label was added by listing cases with this label + list_cases_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels cases list \\ + --title "e2e-single-case" + """) + _assert_contains( + list_cases_output, + [ + "Retrieving test cases with label title 'e2e-single-case'...", + "Found 1 matching test case(s):", + f"Case ID: {single_case_id}" + ] + ) + + finally: + # Clean up: delete the test label + _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_id} + """) \ No newline at end of file From cfe197315f29686ebf1c693d4dba8e8cdeabe2e6 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 14 Aug 2025 15:55:45 +0800 Subject: [PATCH 10/36] TRCLI-113 updated tests report file for property and name to reflect actual case IDs in test instance --- tests_e2e/reports_junit/generic_ids_name.xml | 6 +++--- tests_e2e/reports_junit/generic_ids_property.xml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests_e2e/reports_junit/generic_ids_name.xml b/tests_e2e/reports_junit/generic_ids_name.xml index 0e9f57d..610de13 100644 --- a/tests_e2e/reports_junit/generic_ids_name.xml +++ b/tests_e2e/reports_junit/generic_ids_name.xml @@ -4,8 +4,8 @@ - - + + failed due to... @@ -14,7 +14,7 @@ - + diff --git a/tests_e2e/reports_junit/generic_ids_property.xml b/tests_e2e/reports_junit/generic_ids_property.xml index 979c959..a0a947a 100644 --- a/tests_e2e/reports_junit/generic_ids_property.xml +++ b/tests_e2e/reports_junit/generic_ids_property.xml @@ -6,13 +6,13 @@ - + failed due to... - + @@ -21,7 +21,7 @@ - + From 9bbedbd56c2ff4af425e584088ba53d596a9235d Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 14 Aug 2025 15:56:42 +0800 Subject: [PATCH 11/36] TRCLI-113 updated unit tests for label management for test cases --- tests/test_api_request_handler_labels.py | 368 ++++++++++++++++++++++- tests/test_cmd_labels.py | 283 ++++++++++++++++- 2 files changed, 648 insertions(+), 3 deletions(-) diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index ba76dc3..ca67438 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -1,5 +1,5 @@ import pytest -from unittest.mock import Mock, patch, MagicMock +from unittest.mock import Mock, patch, MagicMock, call from pathlib import Path import json from serde.json import from_json @@ -341,4 +341,368 @@ def test_delete_labels_forbidden(self, labels_handler): success, error = labels_handler.delete_labels(label_ids=[1]) assert success is False - assert error == "No access to the project" \ No newline at end of file + assert error == "No access to the project" + + +class TestApiRequestHandlerLabelsCases: + """Test cases for test case label operations""" + + def setup_method(self): + """Set up test fixtures""" + # Create proper objects like the existing fixture + api_client = APIClient(host_name="http://test.com") + environment = Environment() + environment.project = "Test Project" + environment.batch_size = 10 + + # Create a minimal TestRailSuite for testing + from trcli.data_classes.dataclass_testrail import TestRailSuite + test_suite = TestRailSuite(name="Test Suite") + + self.labels_handler = ApiRequestHandler(environment, api_client, test_suite, verify=False) + + def test_add_labels_to_cases_success(self): + """Test successful addition of labels to test cases""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler, 'add_label') as mock_add_label, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ + patch.object(self.labels_handler.client, 'send_post') as mock_send_post: + + # Mock __get_all_cases response (cases exist) + mock_get_cases.return_value = ([ + {"id": 1, "title": "Case 1", "suite_id": 1}, + {"id": 2, "title": "Case 2", "suite_id": 1} + ], "") + + # Mock get_labels response (label doesn't exist) + mock_get_labels.return_value = ({"labels": []}, "") + + # Mock add_label response (create new label) + mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "") + + # Mock get_case responses + mock_send_get.side_effect = [ + MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}), # Case 1 + MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}) # Case 2 + ] + + # Mock update_cases batch response (for multiple cases) + mock_send_post.return_value = MagicMock(status_code=200) + + # Test the method + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1, 2], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 2 + assert len(results['failed_cases']) == 0 + assert len(results['max_labels_reached']) == 0 + assert len(results['case_not_found']) == 0 + + # Verify API calls - should be called twice: once for multi-suite detection, once for case validation + assert mock_get_cases.call_count == 2 + mock_get_cases.assert_has_calls([ + call(1, None), # Multi-suite detection + call(1, None) # Case validation + ]) + mock_get_labels.assert_called_once_with(1) + mock_add_label.assert_called_once_with(1, "test-label") + assert mock_send_get.call_count == 2 + # Should call update_cases/{suite_id} once for batch update + mock_send_post.assert_called_once_with("update_cases/1", payload={ + 'case_ids': [1, 2], + 'labels': [5] + }) + + def test_add_labels_to_cases_single_case(self): + """Test adding labels to a single test case using update_case endpoint""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler, 'add_label') as mock_add_label, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ + patch.object(self.labels_handler.client, 'send_post') as mock_send_post: + + # Mock __get_all_cases response (case exists) + mock_get_cases.return_value = ([ + {"id": 1, "title": "Case 1"} + ], "") + + # Mock get_labels response (label doesn't exist) + mock_get_labels.return_value = ({"labels": []}, "") + + # Mock add_label response (create new label) + mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "") + + # Mock get_case response + mock_send_get.return_value = MagicMock( + status_code=200, + response_text={"labels": [], "suite_id": 1, "title": "Case 1"} + ) + + # Mock update_case response (for single case) + mock_send_post.return_value = MagicMock(status_code=200) + + # Test the method with single case + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 1 + assert len(results['failed_cases']) == 0 + assert len(results['max_labels_reached']) == 0 + assert len(results['case_not_found']) == 0 + + # Verify API calls + assert mock_get_cases.call_count == 2 + mock_get_labels.assert_called_once_with(1) + mock_add_label.assert_called_once_with(1, "test-label") + assert mock_send_get.call_count == 1 + # Should call update_case/{case_id} once for single case + mock_send_post.assert_called_once_with("update_case/1", payload={'labels': [5]}) + + def test_add_labels_to_cases_existing_label(self): + """Test adding labels when label already exists""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler, 'add_label') as mock_add_label, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ + patch.object(self.labels_handler.client, 'send_post') as mock_send_post: + + # Mock __get_all_cases response (case exists) + mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") + + # Mock get_labels response (label exists) + mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") + + # Mock get_case response + mock_send_get.return_value = MagicMock(status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"}) + + # Mock add_label_to_case response + mock_send_post.return_value = MagicMock(status_code=200) + + # Test the method + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 1 + assert len(results['case_not_found']) == 0 + + # Verify add_label was not called (label already exists) + mock_add_label.assert_not_called() + + def test_add_labels_to_cases_max_labels_reached(self): + """Test handling of maximum labels limit (10)""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get: + + # Mock __get_all_cases response (case exists) + mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") + + # Mock get_labels response + mock_get_labels.return_value = ({"labels": [{"id": 15, "title": "test-label"}]}, "") + + # Mock get_case response with 10 existing labels (different from test-label) + existing_labels = [{"id": i, "title": f"label-{i}"} for i in range(1, 11)] + mock_send_get.return_value = MagicMock( + status_code=200, + response_text={"labels": existing_labels} + ) + + # Test the method + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 0 + assert len(results['failed_cases']) == 0 + assert len(results['max_labels_reached']) == 1 + assert len(results['case_not_found']) == 0 + assert results['max_labels_reached'][0] == 1 + + def test_add_labels_to_cases_label_already_on_case(self): + """Test handling when label already exists on case""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ + patch.object(self.labels_handler.client, 'send_get') as mock_send_get: + + # Mock __get_all_cases response (case exists) + mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") + + # Mock get_labels response + mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") + + # Mock get_case response with the label already present + mock_send_get.return_value = MagicMock( + status_code=200, + response_text={"labels": [{"id": 5, "title": "test-label"}]} + ) + + # Test the method + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[1], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results + assert len(results['successful_cases']) == 1 + assert len(results['case_not_found']) == 0 + assert "already exists" in results['successful_cases'][0]['message'] + + def test_add_labels_to_cases_case_not_found(self): + """Test handling when case IDs don't exist""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: + + # Mock __get_all_cases response (no cases exist) + mock_get_cases.return_value = ([], "") + + # Test the method with case IDs that don't exist + results, error_message = self.labels_handler.add_labels_to_cases( + case_ids=[999, 1000, 1001], + title="test-label", + project_id=1 + ) + + # Verify no error + assert error_message == "" + + # Verify results - all cases should be in case_not_found + assert len(results['case_not_found']) == 3 + assert 999 in results['case_not_found'] + assert 1000 in results['case_not_found'] + assert 1001 in results['case_not_found'] + + # Verify that no other processing happened since no valid cases + assert len(results['successful_cases']) == 0 + assert len(results['failed_cases']) == 0 + assert len(results['max_labels_reached']) == 0 + + def test_get_cases_by_label_with_label_ids(self): + """Test getting cases by label IDs""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: + + # Mock cases response + mock_cases = [ + {"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "label1"}]}, + {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "label2"}]}, + {"id": 3, "title": "Test Case 3", "labels": [{"id": 5, "title": "label1"}]} + ] + mock_get_cases.return_value = (mock_cases, "") + + # Test the method + matching_cases, error_message = self.labels_handler.get_cases_by_label( + project_id=1, + suite_id=None, + label_ids=[5] + ) + + # Verify no error + assert error_message == "" + + # Verify results (should return cases 1 and 3) + assert len(matching_cases) == 2 + assert matching_cases[0]['id'] == 1 + assert matching_cases[1]['id'] == 3 + + def test_get_cases_by_label_with_title(self): + """Test getting cases by label title""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels: + + # Mock labels response + mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") + + # Mock cases response + mock_cases = [ + {"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "test-label"}]}, + {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "other-label"}]} + ] + mock_get_cases.return_value = (mock_cases, "") + + # Test the method + matching_cases, error_message = self.labels_handler.get_cases_by_label( + project_id=1, + suite_id=None, + label_title="test-label" + ) + + # Verify no error + assert error_message == "" + + # Verify results (should return case 1) + assert len(matching_cases) == 1 + assert matching_cases[0]['id'] == 1 + + def test_get_cases_by_label_title_not_found(self): + """Test getting cases by non-existent label title""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ + patch.object(self.labels_handler, 'get_labels') as mock_get_labels: + + # Mock labels response (no matching label) + mock_get_labels.return_value = ({"labels": []}, "") + + # Mock get_all_cases to return empty (not called due to early return) + mock_get_cases.return_value = ([], "") + + # Test the method + matching_cases, error_message = self.labels_handler.get_cases_by_label( + project_id=1, + suite_id=None, + label_title="non-existent-label" + ) + + # Verify error + assert error_message == "" + assert matching_cases == [] + + def test_get_cases_by_label_no_matching_cases(self): + """Test getting cases when no cases have the specified label""" + with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: + + # Mock cases response (no cases with target label) + mock_cases = [ + {"id": 1, "title": "Test Case 1", "labels": [{"id": 6, "title": "other-label"}]}, + {"id": 2, "title": "Test Case 2", "labels": []} + ] + mock_get_cases.return_value = (mock_cases, "") + + # Test the method + matching_cases, error_message = self.labels_handler.get_cases_by_label( + project_id=1, + suite_id=None, + label_ids=[5] + ) + + # Verify no error but no results + assert error_message == "" + assert len(matching_cases) == 0 \ No newline at end of file diff --git a/tests/test_cmd_labels.py b/tests/test_cmd_labels.py index 18a1a0e..ef8fdea 100644 --- a/tests/test_cmd_labels.py +++ b/tests/test_cmd_labels.py @@ -345,4 +345,285 @@ def test_print_config(self): "\n> TestRail instance: https://test.testrail.com (user: test@example.com)" "\n> Project: Test Project" ) - mock_log.assert_called_once_with(expected_message) \ No newline at end of file + mock_log.assert_called_once_with(expected_message) + + +class TestLabelsCasesCommands: + """Test cases for test case label CLI commands""" + + def setup_method(self): + """Set up test fixtures""" + self.runner = CliRunner() + self.environment = Environment() + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_success(self, mock_project_client): + """Test successful addition of labels to test cases""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.suite.suite_id = None + mock_client_instance.api_request_handler.add_labels_to_cases.return_value = ( + { + 'successful_cases': [ + {'case_id': 1, 'message': "Successfully added label 'test-label' to case 1"}, + {'case_id': 2, 'message': "Successfully added label 'test-label' to case 2"} + ], + 'failed_cases': [], + 'max_labels_reached': [], + 'case_not_found': [] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', '1,2', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_cases.assert_called_once_with( + case_ids=[1, 2], + title='test-label', + project_id=1, + suite_id=None + ) + + # Verify success messages were logged + mock_log.assert_any_call("Successfully processed 2 case(s):") + mock_log.assert_any_call(" Case 1: Successfully added label 'test-label' to case 1") + mock_log.assert_any_call(" Case 2: Successfully added label 'test-label' to case 2") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_with_max_labels_reached(self, mock_project_client): + """Test addition of labels with some cases reaching maximum labels""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.add_labels_to_cases.return_value = ( + { + 'successful_cases': [ + {'case_id': 1, 'message': "Successfully added label 'test-label' to case 1"} + ], + 'failed_cases': [], + 'max_labels_reached': [2, 3], + 'case_not_found': [] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', '1,2,3', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 0 + + # Verify warning messages were logged + mock_log.assert_any_call("Warning: 2 case(s) already have maximum labels (10):") + mock_log.assert_any_call(" Case 2: Maximum labels reached") + mock_log.assert_any_call(" Case 3: Maximum labels reached") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_title_too_long(self, mock_project_client): + """Test title length validation""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', '1', '--title', 'this-title-is-way-too-long-for-testrail'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Label title must be 20 characters or less.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_invalid_case_ids(self, mock_project_client): + """Test invalid case IDs format""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', 'invalid,ids', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3).") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_cases_case_not_found(self, mock_project_client): + """Test handling of non-existent case IDs""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.add_labels_to_cases.return_value = ( + { + 'successful_cases': [ + {'case_id': 1, 'message': "Successfully added label 'test-label' to case 1"} + ], + 'failed_cases': [], + 'max_labels_reached': [], + 'case_not_found': [999, 1000] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['add', '--case-ids', '1,999,1000', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 1 + + # Verify error messages were logged + mock_elog.assert_any_call("Error: 2 test case(s) not found:") + mock_elog.assert_any_call(" Case ID 999 does not exist in the project") + mock_elog.assert_any_call(" Case ID 1000 does not exist in the project") + + # Verify success messages were still logged + mock_log.assert_any_call("Successfully processed 1 case(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_cases_by_label_ids_success(self, mock_project_client): + """Test successful listing of cases by label IDs""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.suite = None + mock_client_instance.api_request_handler.get_cases_by_label.return_value = ( + [ + { + 'id': 1, + 'title': 'Test Case 1', + 'labels': [{'id': 5, 'title': 'test-label'}] + }, + { + 'id': 2, + 'title': 'Test Case 2', + 'labels': [{'id': 5, 'title': 'test-label'}, {'id': 6, 'title': 'other-label'}] + } + ], + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['list', '--ids', '5'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_cases_by_label.assert_called_once_with( + project_id=1, + suite_id=None, + label_ids=[5], + label_title=None + ) + + # Verify cases were logged + mock_log.assert_any_call("Found 2 matching test case(s):") + mock_log.assert_any_call(" Case ID: 1, Title: 'Test Case 1' [Labels: ID:5,Title:'test-label']") + mock_log.assert_any_call(" Case ID: 2, Title: 'Test Case 2' [Labels: ID:5,Title:'test-label'; ID:6,Title:'other-label']") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_cases_by_label_title_success(self, mock_project_client): + """Test successful listing of cases by label title""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.suite = None + mock_client_instance.api_request_handler.get_cases_by_label.return_value = ( + [ + { + 'id': 1, + 'title': 'Test Case 1', + 'labels': [{'id': 5, 'title': 'test-label'}] + } + ], + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['list', '--title', 'test-label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_cases_by_label.assert_called_once_with( + project_id=1, + suite_id=None, + label_ids=None, + label_title='test-label' + ) + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_cases_no_filter_provided(self, mock_project_client): + """Test error when neither ids nor title is provided""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['list'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Either --ids or --title must be provided.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_cases_no_matching_cases(self, mock_project_client): + """Test listing when no cases match the label""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.suite = None + mock_client_instance.api_request_handler.get_cases_by_label.return_value = ([], "") + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.cases, + ['list', '--title', 'non-existent'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_log.assert_any_call("Found 0 matching test case(s):") + mock_log.assert_any_call(" No test cases found with label title 'non-existent'.") + + \ No newline at end of file From dd675b805e097307d5566edaa87d6cab28a52c87 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 14 Aug 2025 15:57:11 +0800 Subject: [PATCH 12/36] TRCLI-113 Updated readme guide to include labels management for test cases --- README.md | 155 ++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 140 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index eae4c37..89be056 100644 --- a/README.md +++ b/README.md @@ -289,14 +289,13 @@ will be used to upload all results into the same test run. #### Labels Management -The TestRail CLI provides comprehensive label management capabilities for **Projects** using the `labels` command. Labels help categorize and organize your test management assets efficiently, making it easier to filter and manage test cases, runs, and projects. +The TestRail CLI provides comprehensive label management capabilities using the `labels` command. Labels help categorize and organize your test management assets efficiently, making it easier to filter and manage test cases, runs, and projects. -The `labels` command supports full CRUD (Create, Read, Update, Delete) operations: -- **Add** new labels to projects -- **List** existing labels with pagination support -- **Get** detailed information about specific labels -- **Update** existing label titles -- **Delete** single or multiple labels in batch +The TestRail CLI supports two types of label management: +- **Project Labels**: Manage labels at the project level +- **Test Case Labels**: Apply labels to specific test cases for better organization and filtering + +Both types of labels support full CRUD (Create, Read, Update, Delete) operations with comprehensive validation and error handling. ##### Reference ```shell @@ -310,13 +309,25 @@ Options: Commands: add Add a new label in TestRail + cases Manage labels for test cases delete Delete labels from TestRail get Get a specific label by ID list List all labels in the project update Update an existing label in TestRail ``` -##### Adding Labels +#### Project Labels + +Project labels are managed using the main `labels` command and provide project-wide label management capabilities. These labels can be created, updated, deleted, and listed at the project level. + +**Project Labels Support:** +- **Add** new labels to projects +- **List** existing labels with pagination support +- **Get** detailed information about specific labels +- **Update** existing label titles +- **Delete** single or multiple labels in batch + +###### Adding Labels Create new labels for your project with a descriptive title (maximum 20 characters). ```shell @@ -336,7 +347,7 @@ $ trcli -h https://yourinstance.testrail.io --username --passwor labels add --title "Regression" ``` -##### Listing Labels +###### Listing Labels View all labels in your project with optional pagination support. ```shell @@ -367,7 +378,7 @@ Found 5 labels: ID: 127, Title: 'Performance' ``` -##### Getting Label Details +###### Getting Label Details Retrieve detailed information about a specific label by its ID. ```shell @@ -387,7 +398,7 @@ Label details: Created on: 1234567890 ``` -##### Updating Labels +###### Updating Labels Modify the title of existing labels (maximum 20 characters). ```shell @@ -403,7 +414,7 @@ Updating label with ID 123... Successfully updated label: ID=123, Title='High-Priority' ``` -##### Deleting Labels +###### Deleting Labels Remove single or multiple labels from your project. ```shell @@ -426,7 +437,7 @@ Deleting labels with IDs: 123,124... Successfully deleted 2 label(s) ``` -##### Common Use Cases +###### Common Use Cases **1. Release Management** ```shell @@ -481,7 +492,7 @@ $ trcli -h https://yourinstance.testrail.io --username --passwor labels delete --ids "100,101,102,103,104" ``` -##### Command Options Reference +###### Command Options Reference **Add Command:** ```shell @@ -525,7 +536,7 @@ Options: --help Show this message and exit. ``` -##### Error Handling and Validation +###### Error Handling and Validation The labels command includes comprehensive validation: @@ -549,6 +560,120 @@ $ trcli labels delete --ids "abc,def" Error: Invalid label IDs format ``` +#### Test Case Labels + +In addition to project-level labels, the TestRail CLI also supports **test case label management** through the `labels cases` command. This functionality allows you to assign labels to specific test cases and filter test cases by their labels, providing powerful organization and filtering capabilities for your test suite. + +###### Test Case Label Features +- **Add labels to test cases**: Apply existing or new labels to one or multiple test cases +- **List test cases by labels**: Find test cases that have specific labels applied +- **Automatic label creation**: Labels are created automatically if they don't exist when adding to cases +- **Maximum label validation**: Enforces TestRail's limit of 10 labels per test case +- **Flexible filtering**: Search by label ID or title + +###### Reference +```shell +$ trcli labels cases --help +Usage: trcli labels cases [OPTIONS] COMMAND [ARGS]... + + Manage labels for test cases + +Options: + --help Show this message and exit. + +Commands: + add Add a label to test cases + list List test cases filtered by label ID or title +``` + +###### Adding Labels to Test Cases +Apply labels to one or multiple test cases. If the label doesn't exist, it will be created automatically. + +```shell +# Add a label to a single test case +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases add --case-ids 123 --title "Regression" + +# Add a label to multiple test cases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases add --case-ids "123,124,125" --title "Critical" + +# Add a release label to test cases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases add --case-ids "100,101,102" --title "Sprint-42" +``` + +###### Listing Test Cases by Labels +Find test cases that have specific labels applied, either by label ID or title. + +```shell +# List test cases by label title +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases list --title "Regression" + +# List test cases by label ID +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases list --ids 123 + +# List test cases by multiple label IDs +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels cases list --ids "123,124,125" +``` + +**Output example:** +``` +Retrieving test cases with label title 'Regression'... +Found 3 matching test case(s): + + Case ID: 123, Title: 'Login functionality test' [Labels: ID:5,Title:'Regression'; ID:7,Title:'Critical'] + Case ID: 124, Title: 'Password validation test' [Labels: ID:5,Title:'Regression'] + Case ID: 125, Title: 'User registration test' [Labels: ID:5,Title:'Regression'; ID:8,Title:'UI'] +``` + +**No matches example:** +``` +Retrieving test cases with label title 'Non-Existent'... +Found 0 matching test case(s): + No test cases found with label title 'Non-Existent'. +``` + +###### Command Options Reference + +**Add Cases Command:** +```shell +$ trcli labels cases add --help +Options: + --case-ids Comma-separated list of test case IDs [required] + --title Title of the label to add (max 20 characters) [required] + --help Show this message and exit. +``` + +**List Cases Command:** +```shell +$ trcli labels cases list --help +Options: + --ids Comma-separated list of label IDs to filter by + --title Label title to filter by (max 20 characters) + --help Show this message and exit. +``` + +###### Validation Rules + +**Test Case Label Management includes these validations:** + +- **Label Title**: Maximum 20 characters (same as project labels) +- **Case IDs**: Must be valid integers in comma-separated format +- **Maximum Labels**: Each test case can have maximum 10 labels +- **Filter Requirements**: Either `--ids` or `--title` must be provided for list command +- **Label Creation**: Labels are automatically created if they don't exist when adding to cases +- **Duplicate Prevention**: Adding an existing label to a case is handled gracefully + ### Reference ```shell $ trcli add_run --help From e74e63f68e6618ee50625b6ec23adb7583319cc4 Mon Sep 17 00:00:00 2001 From: cgeisel Date: Tue, 12 Aug 2025 11:48:51 -0700 Subject: [PATCH 13/36] update resolve_suite_id_using_name, get_suite_ids --- tests/test_api_request_handler.py | 70 +++++++++++++++++++++++++++++-- trcli/api/api_request_handler.py | 66 +++++++++++++---------------- trcli/constants.py | 3 +- 3 files changed, 99 insertions(+), 40 deletions(-) diff --git a/tests/test_api_request_handler.py b/tests/test_api_request_handler.py index d4a83c1..4f17c37 100644 --- a/tests/test_api_request_handler.py +++ b/tests/test_api_request_handler.py @@ -550,18 +550,82 @@ def test_check_missing_test_cases_ids_false( assert error == "", "No error should have occurred" @pytest.mark.api_handler - def test_get_suites_id(self, api_request_handler: ApiRequestHandler, requests_mock): + def test_get_suite_ids(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = [ {"id": 100, "name": "Master"}, + {"id": 101, "name": "Smoke"}, ] requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.get_suite_ids(project_id) assert ( - resources_added[0] == mocked_response[0]["id"] + resources_added[0] == mocked_response[0]["id"] and + resources_added[1] == mocked_response[1]["id"] ), "ID in response doesn't match mocked response" - assert error == "", "Error occurred in get_suite_ids" + + @pytest.mark.api_handler + def test_get_suite_ids_error( + self, api_request_handler: ApiRequestHandler, requests_mock + ): + project_id = 3 + + requests_mock.get( + create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout + ) + + suite_ids, error = api_request_handler.get_suite_ids(project_id) + + assert suite_ids == [], "Should return empty list on API error" + assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ + " Please check your settings and try again.", "Should return connection error message" + + @pytest.mark.api_handler + def test_resolve_suite_id_using_name( + self, api_request_handler: ApiRequestHandler, requests_mock, mocker + ): + project_id = 3 + suite_name = "Suite2" + api_request_handler.suites_data_from_provider.name = suite_name + + update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + + mocked_response = { + "offset": 0, + "limit": 250, + "size": 2, + "_links": {"next": None, "prev": None}, + "suites": [ + {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, + {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, + ] + } + + requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) + + suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) + + assert suite_id == 5, "Should return the correct suite ID for matching name with pagination" + assert error == "", "Should have no error message" + + update_data_mock.assert_called_once_with([{"suite_id": 5, "name": "Suite2"}]) + + @pytest.mark.api_handler + def test_resolve_suite_id_using_name_error( + self, api_request_handler: ApiRequestHandler, requests_mock + ): + project_id = 3 + + requests_mock.get( + create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout + ) + + suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) + + assert suite_id == -1, "Should return -1 on API error" + assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ + " Please check your settings and try again.", "Should return connection error message" + @pytest.mark.api_handler def test_return_project_error( diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 81e4f06..f662f35 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -149,25 +149,21 @@ def resolve_suite_id_using_name(self, project_id: int) -> Tuple[int, str]: :arg project_id: project id :returns: tuple with id of the suite and error message""" suite_id = -1 - error_message = "" - response = self.client.send_get(f"get_suites/{project_id}") - if not response.error_message: - try: - parsed = json.loads(response.response_text) if isinstance(response.response_text, str) else response.response_text - suite_list = parsed.get("suites") if isinstance(parsed, dict) else parsed - suite = next( - filter(lambda x: x["name"] == self.suites_data_from_provider.name, suite_list), - None - ) - if suite: + suite_name = self.suites_data_from_provider.name + suites_data, error = self.__get_all_suites(project_id) + if not error: + for suite in suites_data: + if suite["name"] == suite_name: suite_id = suite["id"] self.data_provider.update_data([{"suite_id": suite["id"], "name": suite["name"]}]) - except Exception as e: - error_message = f"Error parsing suites response: {e}" + break + return ( + (suite_id, "") + if suite_id != -1 + else (-1, FAULT_MAPPING["missing_suite"].format(suite_name=suite_name)) + ) else: - error_message = response.error_message - - return suite_id, error_message + return -1, error def get_suite_ids(self, project_id: int) -> Tuple[List[int], str]: """Get suite IDs for requested project_id. @@ -175,29 +171,27 @@ def get_suite_ids(self, project_id: int) -> Tuple[List[int], str]: : returns: tuple with list of suite ids and error string""" available_suites = [] returned_resources = [] - error_message = "" - response = self.client.send_get(f"get_suites/{project_id}") - if not response.error_message: - try: - parsed = json.loads(response.response_text) if isinstance(response.response_text, str) else response.response_text - suite_list = parsed.get("suites") if isinstance(parsed, dict) else parsed - for suite in suite_list: - available_suites.append(int(suite["id"])) - returned_resources.append({ + suites_data, error = self.__get_all_suites(project_id) + if not error: + for suite in suites_data: + available_suites.append(suite["id"]) + returned_resources.append( + { "suite_id": suite["id"], "name": suite["name"], - }) - except Exception as e: - error_message = f"Error parsing suites response: {e}" - else: - error_message = response.error_message - - if returned_resources: - self.data_provider.update_data(suite_data=returned_resources) + } + ) + if returned_resources: + self.data_provider.update_data(suite_data=returned_resources) + else: + print("Update skipped") + return ( + (available_suites, "") + if len(available_suites) > 0 + else ([], FAULT_MAPPING["no_suites_found"].format(project_id=project_id)) + ) else: - print("Update skipped") - - return available_suites, error_message + return [], error def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: """ diff --git a/trcli/constants.py b/trcli/constants.py index 70b9604..35da6bd 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -63,7 +63,8 @@ proxy_bypass_error= "Failed to bypass the proxy for host. Please check the settings.", proxy_invalid_configuration= "The provided proxy configuration is invalid. Please check the proxy URL and format.", ssl_error_on_proxy= "SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.", - no_proxy_match_error= "The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy." + no_proxy_match_error= "The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.", + no_suites_found= "The project {project_id} does not have any suites." ) COMMAND_FAULT_MAPPING = dict( From fe756a6cde095f7cba8fe444e521c3b11ba93a98 Mon Sep 17 00:00:00 2001 From: Serhiy Date: Sun, 17 Aug 2025 23:10:52 +0300 Subject: [PATCH 14/36] Fix milestone_id handling in API data provider; fixes #340 --- trcli/data_providers/api_data_provider.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/trcli/data_providers/api_data_provider.py b/trcli/data_providers/api_data_provider.py index 3b98c14..21a962b 100644 --- a/trcli/data_providers/api_data_provider.py +++ b/trcli/data_providers/api_data_provider.py @@ -93,7 +93,6 @@ def add_run( body = { "suite_id": self.suites_input.suite_id, "description": "\n".join(properties), - "milestone_id": milestone_id, "case_ids": case_ids } if isinstance(start_date, list) and start_date is not None: @@ -116,6 +115,8 @@ def add_run( body["refs"] = refs if run_name is not None: body["name"] = run_name + if milestone_id is not None: + body["milestone_id"] = milestone_id return body def add_results_for_cases(self, bulk_size): From 3510b75f5c97f0f33c07e70d86d2324857b5ce85 Mon Sep 17 00:00:00 2001 From: Serhiy Date: Sun, 17 Aug 2025 23:18:16 +0300 Subject: [PATCH 15/36] Remove milestone_id from test run data providers --- tests/test_data/api_data_provider_test_data.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/test_data/api_data_provider_test_data.py b/tests/test_data/api_data_provider_test_data.py index b6cf010..a51f7cc 100644 --- a/tests/test_data/api_data_provider_test_data.py +++ b/tests/test_data/api_data_provider_test_data.py @@ -39,8 +39,7 @@ "description": "logging: True\ndebug: False", "name": "test run", "suite_id": 123, - "case_ids": [60, 4], - "milestone_id": None + "case_ids": [60, 4] } post_run_full_body = { @@ -48,7 +47,6 @@ "name": "test run", "suite_id": 123, "case_ids": [60, 4], - "milestone_id": None, "assignedto_id": 1, "include_all": True, "refs": "SAN-1, SAN-2" From 35dde02ea96d7f8bdcfcdf297fe761be42c33309 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 19 Aug 2025 21:53:17 +0800 Subject: [PATCH 16/36] TRCLI-114 added changes for labels for tests --- trcli/api/api_request_handler.py | 245 ++++++++++++++++++++++++++++ trcli/commands/cmd_labels.py | 272 ++++++++++++++++++++++++++++++- trcli/constants.py | 2 +- 3 files changed, 517 insertions(+), 2 deletions(-) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index af633b9..a7d382c 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1045,3 +1045,248 @@ def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: L matching_cases.append(case) return matching_cases, "" + + def add_labels_to_tests(self, test_ids: List[int], title: str, project_id: int) -> Tuple[dict, str]: + """ + Add a label to multiple tests + + :param test_ids: List of test IDs + :param title: Label title (max 20 characters) + :param project_id: Project ID for validation + :returns: Tuple with response data and error string + """ + # Initialize results structure + results = { + 'successful_tests': [], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + } + + # Validate test IDs by getting run information for each test + valid_test_ids = [] + for test_id in test_ids: + # Get test information to validate it exists + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results['test_not_found'].append(test_id) + continue + + test_data = test_response.response_text + # Validate that the test belongs to the correct project + run_id = test_data.get('run_id') + if run_id: + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.status_code == 200: + run_data = run_response.response_text + if run_data.get('project_id') == project_id: + valid_test_ids.append(test_id) + else: + results['test_not_found'].append(test_id) + else: + results['test_not_found'].append(test_id) + else: + results['test_not_found'].append(test_id) + + # If no valid test IDs, return early + if not valid_test_ids: + return results, "" + + # Check if label exists or create it + existing_labels, error_message = self.get_labels(project_id) + if error_message: + return results, error_message + + # Find existing label with the same title + label_id = None + for label in existing_labels.get('labels', []): + if label.get('title') == title: + label_id = label.get('id') + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get('label', label_data) + label_id = label_info.get('id') + + # Collect test data and validate constraints + tests_to_update = [] + for test_id in valid_test_ids: + # Get current test to check existing labels + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results['failed_tests'].append({ + 'test_id': test_id, + 'error': f"Could not retrieve test {test_id}: {test_response.error_message}" + }) + continue + + test_data = test_response.response_text + current_labels = test_data.get('labels', []) + + # Check if label already exists on this test + if any(label.get('id') == label_id for label in current_labels): + results['successful_tests'].append({ + 'test_id': test_id, + 'message': f"Label '{title}' already exists on test {test_id}" + }) + continue + + # Check maximum labels limit (10) + if len(current_labels) >= 10: + results['max_labels_reached'].append(test_id) + continue + + # Prepare test for update + existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] + updated_label_ids = existing_label_ids + [label_id] + tests_to_update.append({ + 'test_id': test_id, + 'labels': updated_label_ids + }) + + # Update tests using appropriate endpoint + if len(tests_to_update) == 1: + # Single test: use update_test/{test_id} + test_info = tests_to_update[0] + test_update_data = {'labels': test_info['labels']} + + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + results['successful_tests'].append({ + 'test_id': test_info['test_id'], + 'message': f"Successfully added label '{title}' to test {test_info['test_id']}" + }) + else: + results['failed_tests'].append({ + 'test_id': test_info['test_id'], + 'error': update_response.error_message + }) + elif len(tests_to_update) > 1: + # Multiple tests: use update_tests endpoint with comma-separated test IDs + test_ids_str = ",".join(str(test_info['test_id']) for test_info in tests_to_update) + batch_update_data = { + 'test_ids': [test_info['test_id'] for test_info in tests_to_update], + 'labels': tests_to_update[0]['labels'] # Assuming same labels for all tests + } + + batch_response = self.client.send_post(f"update_tests/{test_ids_str}", payload=batch_update_data) + + if batch_response.status_code == 200: + for test_info in tests_to_update: + results['successful_tests'].append({ + 'test_id': test_info['test_id'], + 'message': f"Successfully added label '{title}' to test {test_info['test_id']}" + }) + else: + # If batch update fails, fall back to individual updates + for test_info in tests_to_update: + test_update_data = {'labels': test_info['labels']} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + results['successful_tests'].append({ + 'test_id': test_info['test_id'], + 'message': f"Successfully added label '{title}' to test {test_info['test_id']}" + }) + else: + results['failed_tests'].append({ + 'test_id': test_info['test_id'], + 'error': update_response.error_message + }) + + return results, "" + + def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: + """ + Get tests filtered by label ID or title + + :param project_id: Project ID + :param label_ids: List of label IDs to filter by + :param label_title: Label title to filter by + :returns: Tuple with list of matching tests and error string + """ + # If filtering by title, first get the label ID + target_label_ids = label_ids or [] + if label_title and not target_label_ids: + labels_data, error_message = self.get_labels(project_id) + if error_message: + return [], error_message + + for label in labels_data.get('labels', []): + if label.get('title') == label_title: + target_label_ids.append(label.get('id')) + + if not target_label_ids: + return [], "" # No label found is a valid case with 0 results + + # Get all runs for the project to find tests + runs_response = self.client.send_get(f"get_runs/{project_id}") + if runs_response.status_code != 200: + return [], runs_response.error_message + + runs_data = runs_response.response_text + runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data + + # Collect all tests from all runs + matching_tests = [] + for run in runs: + run_id = run.get('id') + if not run_id: + continue + + # Get tests for this run + tests_response = self.client.send_get(f"get_tests/{run_id}") + if tests_response.status_code != 200: + continue # Skip this run if we can't get tests + + tests_data = tests_response.response_text + tests = tests_data.get('tests', []) if isinstance(tests_data, dict) else tests_data + + # Filter tests that have any of the target labels + for test in tests: + test_labels = test.get('labels', []) + test_label_ids = [label.get('id') for label in test_labels] + + # Check if any of the target label IDs are present in this test + if any(label_id in test_label_ids for label_id in target_label_ids): + matching_tests.append(test) + + return matching_tests, "" + + def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: + """ + Get labels for specific tests + + :param test_ids: List of test IDs to get labels for + :returns: Tuple with list of test label information and error string + """ + results = [] + + for test_id in test_ids: + # Get test information + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results.append({ + 'test_id': test_id, + 'error': f"Test {test_id} not found or inaccessible", + 'labels': [] + }) + continue + + test_data = test_response.response_text + test_labels = test_data.get('labels', []) + + results.append({ + 'test_id': test_id, + 'title': test_data.get('title', 'Unknown'), + 'status_id': test_data.get('status_id'), + 'labels': test_labels, + 'error': None + }) + + return results, "" diff --git a/trcli/commands/cmd_labels.py b/trcli/commands/cmd_labels.py index e6ce726..0095059 100644 --- a/trcli/commands/cmd_labels.py +++ b/trcli/commands/cmd_labels.py @@ -369,4 +369,274 @@ def list_cases(environment: Environment, context: click.Context, ids: str, title if title: environment.log(f" No test cases found with label title '{title}'.") else: - environment.log(f" No test cases found with the specified label IDs.") \ No newline at end of file + environment.log(f" No test cases found with the specified label IDs.") + + +@cli.group() +@click.pass_context +@pass_environment +def tests(environment: Environment, context: click.Context, *args, **kwargs): + """Manage labels for tests""" + pass + + +@tests.command(name='add') +@click.option("--test-ids", metavar="", help="Comma-separated list of test IDs (e.g., 1,2,3).") +@click.option("--test-id-file", metavar="", help="CSV file containing test IDs.") +@click.option("--title", required=True, metavar="", help="Title of the label to add (max 20 characters).") +@click.pass_context +@pass_environment +def add_to_tests(environment: Environment, context: click.Context, test_ids: str, test_id_file: str, title: str, *args, **kwargs): + """Add a label to tests""" + environment.check_for_required_parameters() + print_config(environment, "Add Tests") + + if len(title) > 20: + environment.elog("Error: Label title must be 20 characters or less.") + exit(1) + + # Validate that either test_ids or test_id_file is provided + if not test_ids and not test_id_file: + environment.elog("Error: Either --test-ids or --test-id-file must be provided.") + exit(1) + + if test_ids and test_id_file: + environment.elog("Error: Cannot use both --test-ids and --test-id-file. Choose one.") + exit(1) + + test_id_list = [] + + # Parse test IDs from command line + if test_ids: + try: + test_id_list = [int(id.strip()) for id in test_ids.split(",")] + except ValueError: + environment.elog("Error: Invalid test IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + # Parse test IDs from CSV file + if test_id_file: + import csv + import os + + if not os.path.exists(test_id_file): + environment.elog(f"Error: File '{test_id_file}' not found.") + exit(1) + + try: + with open(test_id_file, 'r', newline='', encoding='utf-8') as csvfile: + # Try to detect delimiter + sample = csvfile.read(1024) + csvfile.seek(0) + sniffer = csv.Sniffer() + + single_column_mode = False + try: + delimiter = sniffer.sniff(sample).delimiter + except csv.Error: + # If delimiter detection fails, check for common delimiters + if ',' in sample: + delimiter = ',' + elif ';' in sample: + delimiter = ';' + elif '\t' in sample: + delimiter = '\t' + else: + # Single column file - use line-by-line reading + single_column_mode = True + + if single_column_mode: + # Handle single column files (no delimiters) + csvfile.seek(0) + lines = csvfile.readlines() + for line in lines: + line = line.strip() + if line and not line.lower().startswith('test'): # Skip likely headers + try: + test_id_list.append(int(line)) + except ValueError: + environment.log(f"Warning: Ignoring invalid test ID '{line}' in file") + else: + # Handle CSV files with delimiters + reader = csv.reader(csvfile, delimiter=delimiter) + + # Skip header if it exists (check if first row contains non-numeric values) + first_row = next(reader, None) + if first_row: + # Check if first row looks like a header + try: + # If we can convert all values to int, it's likely data, not header + [int(val.strip()) for val in first_row if val.strip()] + # Reset to beginning and don't skip + csvfile.seek(0) + reader = csv.reader(csvfile, delimiter=delimiter) + except ValueError: + # First row contains non-numeric data, likely header, so we skip it + pass + + for row in reader: + for cell in row: + cell_value = cell.strip() + if cell_value: # Skip empty cells + try: + test_id_list.append(int(cell_value)) + except ValueError: + environment.log(f"Warning: Ignoring invalid test ID '{cell_value}' in file") + + except Exception as e: + environment.elog(f"Error reading CSV file: {e}") + exit(1) + + if not test_id_list: + environment.elog("Error: No valid test IDs found in the CSV file.") + exit(1) + + environment.log(f"Loaded {len(test_id_list)} test ID(s) from file '{test_id_file}'") + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Adding label '{title}' to {len(test_id_list)} test(s)...") + + results, error_message = project_client.api_request_handler.add_labels_to_tests( + test_ids=test_id_list, + title=title, + project_id=project_client.project.project_id + ) + + if error_message: + environment.elog(f"Failed to add labels to tests: {error_message}") + exit(1) + else: + # Report results + successful_tests = results.get('successful_tests', []) + failed_tests = results.get('failed_tests', []) + max_labels_reached = results.get('max_labels_reached', []) + test_not_found = results.get('test_not_found', []) + + if test_not_found: + environment.log(f"Warning: {len(test_not_found)} test(s) not found or not accessible:") + for test_id in test_not_found: + environment.log(f" Test ID {test_id} does not exist or is not accessible") + + if successful_tests: + environment.log(f"Successfully processed {len(successful_tests)} test(s):") + for test_result in successful_tests: + environment.log(f" Test {test_result['test_id']}: {test_result['message']}") + + if max_labels_reached: + environment.log(f"Warning: {len(max_labels_reached)} test(s) already have maximum labels (10):") + for test_id in max_labels_reached: + environment.log(f" Test {test_id}: Maximum labels reached") + + if failed_tests: + environment.log(f"Failed to process {len(failed_tests)} test(s):") + for test_result in failed_tests: + environment.log(f" Test {test_result['test_id']}: {test_result['error']}") + + +@tests.command(name='list') +@click.option("--ids", required=True, metavar="", help="Comma-separated list of label IDs to filter by (e.g., 1,2,3).") +@click.pass_context +@pass_environment +def list_tests(environment: Environment, context: click.Context, ids: str, *args, **kwargs): + """List tests filtered by label ID""" + environment.check_for_required_parameters() + print_config(environment, "List Tests by Label") + + try: + label_ids = [int(id.strip()) for id in ids.split(",")] + except ValueError: + environment.elog("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Retrieving tests with label IDs: {', '.join(map(str, label_ids))}...") + + matching_tests, error_message = project_client.api_request_handler.get_tests_by_label( + project_id=project_client.project.project_id, + label_ids=label_ids + ) + + if error_message: + environment.elog(f"Failed to retrieve tests: {error_message}") + exit(1) + else: + environment.log(f"Found {len(matching_tests)} matching test(s):") + environment.log("") + + if matching_tests: + for test in matching_tests: + test_labels = test.get('labels', []) + label_info = [] + for label in test_labels: + label_info.append(f"ID:{label.get('id')},Title:'{label.get('title')}'") + + labels_str = f" [Labels: {'; '.join(label_info)}]" if label_info else " [No labels]" + status_name = test.get('status_id', 'Unknown') + environment.log(f" Test ID: {test['id']}, Title: '{test.get('title', 'Unknown')}', Status: {status_name}{labels_str}") + else: + environment.log(f" No tests found with the specified label IDs.") + + +@tests.command(name='get') +@click.option("--test-id", required=True, metavar="", help="Comma-separated list of test IDs (e.g., 1,2,3).") +@click.pass_context +@pass_environment +def get_test_labels(environment: Environment, context: click.Context, test_id: str, *args, **kwargs): + """Get the labels of tests using test IDs""" + environment.check_for_required_parameters() + print_config(environment, "Get Test Labels") + + try: + test_id_list = [int(id.strip()) for id in test_id.split(",")] + except ValueError: + environment.elog("Error: Invalid test IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Retrieving labels for {len(test_id_list)} test(s)...") + + test_labels, error_message = project_client.api_request_handler.get_test_labels(test_id_list) + + if error_message: + environment.elog(f"Failed to retrieve test labels: {error_message}") + exit(1) + else: + environment.log(f"Test label information:") + environment.log("") + + for test_info in test_labels: + test_id = test_info['test_id'] + + if test_info.get('error'): + environment.log(f" Test ID: {test_id} - Error: {test_info['error']}") + else: + test_labels = test_info.get('labels', []) + title = test_info.get('title', 'Unknown') + status_id = test_info.get('status_id', 'Unknown') + + environment.log(f" Test ID: {test_id}") + environment.log(f" Title: '{title}'") + environment.log(f" Status: {status_id}") + + if test_labels: + environment.log(f" Labels ({len(test_labels)}):") + for label in test_labels: + environment.log(f" - ID: {label.get('id')}, Title: '{label.get('title')}'") + else: + environment.log(f" Labels: No labels assigned") + environment.log("") \ No newline at end of file diff --git a/trcli/constants.py b/trcli/constants.py index dbe346e..34cf1f7 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -93,7 +93,7 @@ - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run - - labels: Manage labels (add, update, delete, list)""" + - labels: Manage labels (projects, cases, and tests)""" MISSING_COMMAND_SLOGAN = """Usage: trcli [OPTIONS] COMMAND [ARGS]...\nTry 'trcli --help' for help. \nError: Missing command.""" From 7352b9ec4b4f22a4f165581122118c0de0776e41 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 19 Aug 2025 21:56:35 +0800 Subject: [PATCH 17/36] TRCLI-114 added functional tests and unit tests for labels management for tests, also added sample csv file for bulk testing labels assigning facility --- tests/test_api_request_handler_labels.py | 319 ++++++++++++++++++++++- tests/test_cmd_labels.py | 258 ++++++++++++++++++ tests/test_data/cli_test_data.py | 2 +- tests_e2e/sample_csv/test_ids.csv | 3 + tests_e2e/test_end2end.py | 267 +++++++++++++++++-- 5 files changed, 830 insertions(+), 19 deletions(-) create mode 100644 tests_e2e/sample_csv/test_ids.csv diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index ca67438..203f197 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -705,4 +705,321 @@ def test_get_cases_by_label_no_matching_cases(self): # Verify no error but no results assert error_message == "" - assert len(matching_cases) == 0 \ No newline at end of file + assert len(matching_cases) == 0 + + +class TestApiRequestHandlerTestLabels: + """Test class for test label management API methods""" + + def test_add_labels_to_tests_success_single(self, labels_handler): + """Test successful label addition to a single test""" + # Mock test validation + mock_test_response = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, + error_message=None + ) + + # Mock run validation + mock_run_response = APIClientResult( + status_code=200, + response_text={"id": 1, "project_id": 1}, + error_message=None + ) + + # Mock existing labels + mock_labels_response = APIClientResult( + status_code=200, + response_text={"labels": []}, + error_message=None + ) + + # Mock label creation + mock_add_label_response = APIClientResult( + status_code=200, + response_text={"id": 5, "title": "Test Label"}, + error_message=None + ) + + # Mock test update + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get, \ + patch.object(labels_handler.client, 'send_post') as mock_post: + + # Setup get responses for validation and label retrieval + mock_get.side_effect = [ + mock_test_response, # get_test/{test_id} + mock_run_response, # get_run/{run_id} + mock_labels_response, # get_labels + mock_test_response, # get_test/{test_id} again for labels check + ] + + # Setup post responses for label creation and test update + mock_post.side_effect = [ + mock_add_label_response, # add_label + mock_update_response # update_test + ] + + result, error = labels_handler.add_labels_to_tests( + test_ids=[1], + title="Test Label", + project_id=1 + ) + + assert error == "" + assert len(result['successful_tests']) == 1 + assert len(result['failed_tests']) == 0 + assert len(result['test_not_found']) == 0 + assert len(result['max_labels_reached']) == 0 + + def test_add_labels_to_tests_test_not_found(self, labels_handler): + """Test handling of non-existent test IDs""" + # Mock test not found + mock_test_response = APIClientResult( + status_code=404, + response_text=None, + error_message="Test not found" + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): + result, error = labels_handler.add_labels_to_tests( + test_ids=[999], + title="Test Label", + project_id=1 + ) + + assert error == "" + assert len(result['test_not_found']) == 1 + assert 999 in result['test_not_found'] + + def test_add_labels_to_tests_max_labels_reached(self, labels_handler): + """Test handling of tests that already have maximum labels""" + # Create 10 existing labels + existing_labels = [{"id": i, "title": f"Label {i}"} for i in range(1, 11)] + + # Mock test with max labels + mock_test_response = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": existing_labels}, + error_message=None + ) + + # Mock run validation + mock_run_response = APIClientResult( + status_code=200, + response_text={"id": 1, "project_id": 1}, + error_message=None + ) + + # Mock existing labels + mock_labels_response = APIClientResult( + status_code=200, + response_text={"labels": []}, + error_message=None + ) + + # Mock label creation + mock_add_label_response = APIClientResult( + status_code=200, + response_text={"id": 11, "title": "New Label"}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get, \ + patch.object(labels_handler.client, 'send_post') as mock_post: + + mock_get.side_effect = [ + mock_test_response, # get_test/{test_id} + mock_run_response, # get_run/{run_id} + mock_labels_response, # get_labels + mock_test_response, # get_test/{test_id} again for labels check + ] + + mock_post.return_value = mock_add_label_response + + result, error = labels_handler.add_labels_to_tests( + test_ids=[1], + title="New Label", + project_id=1 + ) + + assert error == "" + assert len(result['max_labels_reached']) == 1 + assert 1 in result['max_labels_reached'] + + def test_get_tests_by_label_success(self, labels_handler): + """Test successful retrieval of tests by label""" + # Mock runs response + mock_runs_response = APIClientResult( + status_code=200, + response_text={"runs": [{"id": 1}, {"id": 2}]}, + error_message=None + ) + + # Mock tests responses for each run + mock_tests_response_run1 = APIClientResult( + status_code=200, + response_text={"tests": [ + {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}, + {"id": 2, "title": "Test 2", "labels": []} + ]}, + error_message=None + ) + + mock_tests_response_run2 = APIClientResult( + status_code=200, + response_text={"tests": [ + {"id": 3, "title": "Test 3", "labels": [{"id": 5, "title": "Test Label"}]} + ]}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get: + mock_get.side_effect = [ + mock_runs_response, # get_runs/{project_id} + mock_tests_response_run1, # get_tests/{run_id} for run 1 + mock_tests_response_run2 # get_tests/{run_id} for run 2 + ] + + result, error = labels_handler.get_tests_by_label( + project_id=1, + label_ids=[5] + ) + + assert error == "" + assert len(result) == 2 + assert result[0]['id'] == 1 + assert result[1]['id'] == 3 + + def test_get_test_labels_success(self, labels_handler): + """Test successful retrieval of test labels""" + # Mock test responses + mock_test_response1 = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test 1", + "status_id": 1, + "labels": [{"id": 5, "title": "Test Label"}] + }, + error_message=None + ) + + mock_test_response2 = APIClientResult( + status_code=200, + response_text={ + "id": 2, + "title": "Test 2", + "status_id": 2, + "labels": [] + }, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get: + mock_get.side_effect = [mock_test_response1, mock_test_response2] + + result, error = labels_handler.get_test_labels([1, 2]) + + assert error == "" + assert len(result) == 2 + + # Check first test + assert result[0]['test_id'] == 1 + assert result[0]['title'] == "Test 1" + assert result[0]['status_id'] == 1 + assert len(result[0]['labels']) == 1 + assert result[0]['labels'][0]['title'] == "Test Label" + assert result[0]['error'] is None + + # Check second test + assert result[1]['test_id'] == 2 + assert result[1]['title'] == "Test 2" + assert result[1]['status_id'] == 2 + assert len(result[1]['labels']) == 0 + assert result[1]['error'] is None + + def test_get_test_labels_test_not_found(self, labels_handler): + """Test handling of non-existent test IDs in get_test_labels""" + # Mock test not found + mock_test_response = APIClientResult( + status_code=404, + response_text=None, + error_message="Test not found" + ) + + with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): + result, error = labels_handler.get_test_labels([999]) + + assert error == "" + assert len(result) == 1 + assert result[0]['test_id'] == 999 + assert result[0]['error'] == "Test 999 not found or inaccessible" + assert result[0]['labels'] == [] + + def test_add_labels_to_tests_batch_update(self, labels_handler): + """Test batch update of multiple tests""" + # Mock test validation for multiple tests + mock_test_response1 = APIClientResult( + status_code=200, + response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, + error_message=None + ) + + mock_test_response2 = APIClientResult( + status_code=200, + response_text={"id": 2, "title": "Test 2", "run_id": 1, "labels": []}, + error_message=None + ) + + # Mock run validation + mock_run_response = APIClientResult( + status_code=200, + response_text={"id": 1, "project_id": 1}, + error_message=None + ) + + # Mock existing labels + mock_labels_response = APIClientResult( + status_code=200, + response_text={"labels": [{"id": 5, "title": "Test Label"}]}, + error_message=None + ) + + # Mock batch update + mock_batch_response = APIClientResult( + status_code=200, + response_text={"updated": 2}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get, \ + patch.object(labels_handler.client, 'send_post') as mock_post: + + # Setup get responses + mock_get.side_effect = [ + mock_test_response1, # get_test/1 + mock_run_response, # get_run/1 + mock_test_response2, # get_test/2 + mock_run_response, # get_run/1 + mock_labels_response, # get_labels + mock_test_response1, # get_test/1 for labels check + mock_test_response2, # get_test/2 for labels check + ] + + # Setup batch update response + mock_post.return_value = mock_batch_response + + result, error = labels_handler.add_labels_to_tests( + test_ids=[1, 2], + title="Test Label", + project_id=1 + ) + + assert error == "" + assert len(result['successful_tests']) == 2 \ No newline at end of file diff --git a/tests/test_cmd_labels.py b/tests/test_cmd_labels.py index ef8fdea..108b1ba 100644 --- a/tests/test_cmd_labels.py +++ b/tests/test_cmd_labels.py @@ -625,5 +625,263 @@ def test_list_cases_no_matching_cases(self, mock_project_client): assert result.exit_code == 0 mock_log.assert_any_call("Found 0 matching test case(s):") mock_log.assert_any_call(" No test cases found with label title 'non-existent'.") +class TestCmdLabelsTests: + """Test class for test labels command functionality""" + + def setup_method(self): + """Set up test environment""" + self.runner = CliRunner() + self.environment = Environment(cmd="labels") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_success(self, mock_project_client): + """Test successful label addition to tests""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [{'test_id': 1, 'message': 'Success'}], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + }, + "" + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( + test_ids=[1], title='Test Label', project_id=1 + ) + mock_log.assert_any_call("Successfully processed 1 test(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_with_csv_file(self, mock_project_client): + """Test label addition to tests using CSV file""" + # Mock the project client + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [{'test_id': 1, 'message': 'Success'}, {'test_id': 2, 'message': 'Success'}], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + }, + "" + ) + + # Create a temporary CSV file + with self.runner.isolated_filesystem(): + with open('test_ids.csv', 'w') as f: + f.write('test_id,description\n1,Test One\n2,Test Two\n') + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-id-file', 'test_ids.csv', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( + test_ids=[1, 2], title='Test Label', project_id=1 + ) + mock_log.assert_any_call("Loaded 2 test ID(s) from file 'test_ids.csv'") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_validation_error(self, mock_project_client): + """Test validation error when neither test-ids nor file provided""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: Either --test-ids or --test-id-file must be provided.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_title_too_long(self, mock_project_client): + """Test validation error for title too long""" + long_title = "a" * 21 # 21 characters, exceeds limit + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', long_title], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: Label title must be 20 characters or less.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_tests_by_label_success(self, mock_project_client): + """Test successful listing of tests by label""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.get_tests_by_label.return_value = ( + [ + {'id': 1, 'title': 'Test 1', 'status_id': 1, 'labels': [{'id': 5, 'title': 'Test Label'}]}, + {'id': 2, 'title': 'Test 2', 'status_id': 2, 'labels': [{'id': 5, 'title': 'Test Label'}]} + ], + "" + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['list', '--ids', '5'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_tests_by_label.assert_called_once_with( + project_id=1, label_ids=[5] + ) + mock_log.assert_any_call("Found 2 matching test(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_get_test_labels_success(self, mock_project_client): + """Test successful retrieval of test labels""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.get_test_labels.return_value = ( + [ + { + 'test_id': 1, + 'title': 'Test 1', + 'status_id': 1, + 'labels': [{'id': 5, 'title': 'Test Label'}], + 'error': None + } + ], + "" + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['get', '--test-id', '1'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.get_test_labels.assert_called_once_with([1]) + mock_log.assert_any_call("Test label information:") + mock_log.assert_any_call(" Test ID: 1") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_tests_invalid_ids(self, mock_project_client): + """Test invalid label IDs format in list command""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['list', '--ids', 'invalid,ids'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_csv_file_not_found(self, mock_project_client): + """Test error when CSV file is not found""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-id-file', 'nonexistent.csv', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: File 'nonexistent.csv' not found.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_with_warnings(self, mock_project_client): + """Test label addition with warnings for not found tests and max labels""" + # Mock the project client + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [{'test_id': 1, 'message': 'Success'}], + 'failed_tests': [], + 'max_labels_reached': [2], + 'test_not_found': [999] + }, + "" + ) + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1,2,999', '--title', 'Test Label'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_log.assert_any_call("Warning: 1 test(s) not found or not accessible:") + mock_log.assert_any_call(" Test ID 999 does not exist or is not accessible") + mock_log.assert_any_call("Warning: 1 test(s) already have maximum labels (10):") + mock_log.assert_any_call(" Test 2: Maximum labels reached") \ No newline at end of file diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index 9881239..756feae 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -67,6 +67,6 @@ ' - parse_robot: Robot Framework XML Files\n' ' - parse_openapi: OpenAPI YML Files\n' ' - add_run: Create a new test run\n' - ' - labels: Manage labels (add, update, delete, list)\n') + ' - labels: Manage labels (projects, cases, and tests)\n') trcli_help_description = "TestRail CLI" diff --git a/tests_e2e/sample_csv/test_ids.csv b/tests_e2e/sample_csv/test_ids.csv new file mode 100644 index 0000000..68c889e --- /dev/null +++ b/tests_e2e/sample_csv/test_ids.csv @@ -0,0 +1,3 @@ +test_id +266149 +266151 diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 0d785dd..89c0b88 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -939,18 +939,25 @@ def test_labels_edge_cases(self): def test_labels_cases_full_workflow(self): """Test complete workflow of test case label operations""" + import random + import string + + # Generate random suffix to avoid label conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + case_label_title = f"e2e-case-{random_suffix}" + # First, create a test label add_label_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ - --title "e2e-case-label" + --title "{case_label_title}" """) _assert_contains( add_label_output, [ - "Adding label 'e2e-case-label'...", + f"Adding label '{case_label_title}'...", "Successfully added label:" ] ) @@ -963,43 +970,42 @@ def test_labels_cases_full_workflow(self): try: # Use known test case IDs that should exist in the test project - # These are typical case IDs that exist in the TestRail test environment test_case_ids = ["24964", "24965"] # Multiple test cases for batch testing - # Add labels to test cases (using single-suite project for batch testing) + # Add labels to test cases add_cases_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "{','.join(test_case_ids)}" \\ - --title "e2e-case-label" + --title "{case_label_title}" """) _assert_contains( add_cases_output, [ - f"Adding label 'e2e-case-label' to {len(test_case_ids)} test case(s)...", + f"Adding label '{case_label_title}' to {len(test_case_ids)} test case(s)...", "Successfully processed" ] ) - # List test cases by label title (using single-suite project) + # List test cases by label title list_by_title_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ - --title "e2e-case-label" + --title "{case_label_title}" """) _assert_contains( list_by_title_output, [ - "Retrieving test cases with label title 'e2e-case-label'...", + f"Retrieving test cases with label title '{case_label_title}'...", "matching test case(s):" ] ) - # List test cases by label ID (using single-suite project) + # List test cases by label ID list_by_id_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ @@ -1167,18 +1173,25 @@ def test_labels_cases_no_matching_cases(self): def test_labels_cases_single_case_workflow(self): """Test single case label operations using update_case endpoint""" + import random + import string + + # Generate random suffix to avoid label conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + single_case_label_title = f"e2e-single-{random_suffix}" + # First, create a test label add_label_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ - --title "e2e-single-case" + --title "{single_case_label_title}" """) _assert_contains( add_label_output, [ - "Adding label 'e2e-single-case'...", + f"Adding label '{single_case_label_title}'...", "Successfully added label:" ] ) @@ -1200,14 +1213,14 @@ def test_labels_cases_single_case_workflow(self): --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "{single_case_id}" \\ - --title "e2e-single-case" + --title "{single_case_label_title}" """) _assert_contains( add_single_case_output, [ - f"Adding label 'e2e-single-case' to 1 test case(s)...", + f"Adding label '{single_case_label_title}' to 1 test case(s)...", "Successfully processed 1 case(s):", - f"Successfully added label 'e2e-single-case' to case {single_case_id}" + f"Successfully added label '{single_case_label_title}' to case {single_case_id}" ] ) @@ -1217,12 +1230,12 @@ def test_labels_cases_single_case_workflow(self): -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ - --title "e2e-single-case" + --title "{single_case_label_title}" """) _assert_contains( list_cases_output, [ - "Retrieving test cases with label title 'e2e-single-case'...", + f"Retrieving test cases with label title '{single_case_label_title}'...", "Found 1 matching test case(s):", f"Case ID: {single_case_id}" ] @@ -1237,4 +1250,224 @@ def test_labels_cases_single_case_workflow(self): labels delete \\ --ids {label_id} """) + + def test_labels_tests_full_workflow(self): + """Test complete workflow of test label operations""" + import random + import string + + # Generate random suffix to avoid label conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + test_label_title = f"e2e-test-{random_suffix}" + + # First, create a test label + add_label_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels add \\ + --title "{test_label_title}" + """) + _assert_contains( + add_label_output, + [ + f"Adding label '{test_label_title}'...", + "Successfully added label:" + ] + ) + + # Extract label ID for cleanup + import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) + assert label_id_match, "Could not extract label ID from output" + label_id = label_id_match.group(1) + + try: + # Use known test IDs that should exist in the test project + test_ids = ["266149", "266151"] # Real test IDs for functional testing + + # Test 1: Add labels to tests using --test-ids + add_tests_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests add \\ + --test-ids "{','.join(test_ids)}" \\ + --title "{test_label_title}" + """) + + _assert_contains( + add_tests_output, + [ + f"Adding label '{test_label_title}' to {len(test_ids)} test(s)..." + ] + ) + + # Test 2: Add labels to tests using CSV file + import os + csv_file_path = os.path.join(os.path.dirname(__file__), "sample_csv", "test_ids.csv") + + add_tests_csv_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests add \\ + --test-id-file "{csv_file_path}" \\ + --title "{test_label_title}" + """) + + _assert_contains( + add_tests_csv_output, + [ + "Loaded 2 test ID(s) from file", + f"Adding label '{test_label_title}' to 2 test(s)..." + ] + ) + + # Test 3: List tests by label ID + list_tests_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests list \\ + --ids "{label_id}" + """) + _assert_contains( + list_tests_output, + [ + f"Retrieving tests with label IDs: {label_id}...", + "matching test(s):" + ] + ) + + # Test 4: Get test labels for specific tests + get_test_labels_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests get \\ + --test-id "{','.join(test_ids)}" + """) + _assert_contains( + get_test_labels_output, + [ + f"Retrieving labels for {len(test_ids)} test(s)...", + "Test label information:" + ] + ) + + finally: + # Cleanup - delete the test label + delete_output = _run_cmd(f""" +echo "y" | trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels delete \\ + --ids {label_id} + """) + + def test_labels_tests_validation_errors(self): + """Test validation errors for test label commands""" + import random + import string + + # Generate random suffix to avoid label conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + # Test title too long (21 characters exceeds 20 character limit) + long_title = f"this-is-a-very-long-title-{random_suffix}" # This will be > 20 chars + title_error_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests add \\ + --test-ids "266149" \\ + --title "{long_title}" + """) + assert return_code != 0 + _assert_contains( + title_error_output, + ["Error: Label title must be 20 characters or less."] + ) + + # Test missing test-ids and file + valid_title = f"test-{random_suffix}"[:20] # Ensure valid length + missing_ids_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests add \\ + --title "{valid_title}" + """) + assert return_code != 0 + _assert_contains( + missing_ids_output, + ["Error: Either --test-ids or --test-id-file must be provided."] + ) + + # Test invalid label IDs format in list command + invalid_ids_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + labels tests list \\ + --ids "invalid,ids" + """) + assert return_code != 0 + _assert_contains( + invalid_ids_output, + ["Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3)."] + ) + + def test_labels_tests_help_commands(self): + """Test help output for test label commands""" + + # Test main tests help + tests_help_output = _run_cmd("trcli labels tests --help") + _assert_contains( + tests_help_output, + [ + "Usage: trcli labels tests [OPTIONS] COMMAND [ARGS]...", + "Manage labels for tests", + "Commands:", + "add", + "list", + "get" + ] + ) + + # Test tests add help + tests_add_help_output = _run_cmd("trcli labels tests add --help") + _assert_contains( + tests_add_help_output, + [ + "Usage: trcli labels tests add [OPTIONS]", + "Add a label to tests", + "--test-ids", + "--test-id-file", + "--title" + ] + ) + + # Test tests list help + tests_list_help_output = _run_cmd("trcli labels tests list --help") + _assert_contains( + tests_list_help_output, + [ + "Usage: trcli labels tests list [OPTIONS]", + "List tests filtered by label ID", + "--ids" + ] + ) + + # Test tests get help + tests_get_help_output = _run_cmd("trcli labels tests get --help") + _assert_contains( + tests_get_help_output, + [ + "Usage: trcli labels tests get [OPTIONS]", + "Get the labels of tests using test IDs", + "--test-id" + ] + ) \ No newline at end of file From c00129a8f9e2a46183363c2eb10efc31918879df Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 19 Aug 2025 21:57:18 +0800 Subject: [PATCH 18/36] TRCLI-114 added README guide for labels management for tests --- README.md | 219 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 216 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 89be056..cb909fa 100644 --- a/README.md +++ b/README.md @@ -291,11 +291,12 @@ will be used to upload all results into the same test run. The TestRail CLI provides comprehensive label management capabilities using the `labels` command. Labels help categorize and organize your test management assets efficiently, making it easier to filter and manage test cases, runs, and projects. -The TestRail CLI supports two types of label management: +The TestRail CLI supports three types of label management: - **Project Labels**: Manage labels at the project level -- **Test Case Labels**: Apply labels to specific test cases for better organization and filtering +- **Test Case Labels**: Apply labels to specific test cases for better organization and filtering +- **Test Labels**: Apply labels to specific tests (instances of test cases within test runs) for execution management -Both types of labels support full CRUD (Create, Read, Update, Delete) operations with comprehensive validation and error handling. +All types of labels support comprehensive operations with validation and error handling. Project labels support full CRUD operations, while test case and test labels focus on assignment and retrieval operations. ##### Reference ```shell @@ -313,6 +314,7 @@ Commands: delete Delete labels from TestRail get Get a specific label by ID list List all labels in the project + tests Manage labels for tests update Update an existing label in TestRail ``` @@ -674,6 +676,217 @@ Options: - **Label Creation**: Labels are automatically created if they don't exist when adding to cases - **Duplicate Prevention**: Adding an existing label to a case is handled gracefully +#### Test Labels + +The TestRail CLI also supports **test label management** through the `labels tests` command. This functionality allows you to assign labels to specific tests (instances of test cases within test runs), providing powerful organization and filtering capabilities for your test execution. + +###### Test Label Features +- **Add labels to tests**: Apply existing or new labels to one or multiple tests +- **CSV file support**: Bulk assign labels using CSV files containing test IDs +- **List tests by labels**: Find tests that have specific labels applied +- **Get test labels**: Retrieve all labels assigned to specific tests +- **Automatic label creation**: Labels are created automatically if they don't exist when adding to tests +- **Maximum label validation**: Enforces TestRail's limit of 10 labels per test +- **Flexible filtering**: Search by label ID for efficient test management + +###### Reference +```shell +$ trcli labels tests --help +Usage: trcli labels tests [OPTIONS] COMMAND [ARGS]... + + Manage labels for tests + +Options: + --help Show this message and exit. + +Commands: + add Add a label to tests + list List tests filtered by label ID + get Get the labels of tests using test IDs +``` + +###### Adding Labels to Tests +Apply labels to one or multiple tests. If the label doesn't exist, it will be created automatically. + +```shell +# Add a label to a single test +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests add --test-ids 123 --title "Regression" + +# Add a label to multiple tests +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests add --test-ids "123,124,125" --title "Critical" + +# Add a label to tests using CSV file +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests add --test-id-file test_ids.csv --title "Sprint-42" +``` + +**CSV File Format:** +The CSV file should contain test IDs, one per row or comma-separated. Headers are automatically detected and skipped. +```csv +test_id +123 +124 +125 +``` + +Or simple format: +```csv +123,124,125 +``` + +###### Listing Tests by Labels +Find tests that have specific labels applied by label ID. + +```shell +# List tests by label ID +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests list --ids 123 + +# List tests by multiple label IDs +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests list --ids "123,124,125" +``` + +**Output example:** +``` +Retrieving tests with label IDs: 123... +Found 2 matching test(s): + + Test ID: 1001, Title: 'Login functionality test', Status: 1 [Labels: ID:123,Title:'Regression'; ID:124,Title:'Critical'] + Test ID: 1002, Title: 'Password validation test', Status: 2 [Labels: ID:123,Title:'Regression'] +``` + +###### Getting Test Labels +Retrieve all labels assigned to specific tests. + +```shell +# Get labels for a single test +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests get --test-id 123 + +# Get labels for multiple tests +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + labels tests get --test-id "123,124,125" +``` + +**Output example:** +``` +Retrieving labels for 2 test(s)... +Test label information: + + Test ID: 123 + Title: 'Login functionality test' + Status: 1 + Labels (2): + - ID: 5, Title: 'Regression' + - ID: 7, Title: 'Critical' + + Test ID: 124 + Title: 'Password validation test' + Status: 2 + Labels: No labels assigned +``` + +###### Command Options Reference + +**Add Tests Command:** +```shell +$ trcli labels tests add --help +Options: + --test-ids Comma-separated list of test IDs (e.g., 1,2,3) + --test-id-file CSV file containing test IDs + --title Title of the label to add (max 20 characters) [required] + --help Show this message and exit. +``` + +**List Tests Command:** +```shell +$ trcli labels tests list --help +Options: + --ids Comma-separated list of label IDs to filter by [required] + --help Show this message and exit. +``` + +**Get Tests Command:** +```shell +$ trcli labels tests get --help +Options: + --test-id Comma-separated list of test IDs (e.g., 1,2,3) [required] + --help Show this message and exit. +``` + +###### Validation Rules + +**Test Label Management includes these validations:** + +- **Label Title**: Maximum 20 characters (same as project and case labels) +- **Test IDs**: Must be valid integers in comma-separated format +- **Maximum Labels**: Each test can have maximum 10 labels +- **Input Requirements**: Either `--test-ids` or `--test-id-file` must be provided for add command +- **Label Creation**: Labels are automatically created if they don't exist when adding to tests +- **Duplicate Prevention**: Adding an existing label to a test is handled gracefully +- **CSV File Validation**: Invalid entries in CSV files are ignored with warnings + +###### Common Use Cases + +**1. Test Execution Categorization** +```shell +# Label tests by execution type +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "API Tests" \ + labels tests add --test-ids "1001,1002,1003" --title "Smoke" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "API Tests" \ + labels tests add --test-ids "1004,1005" --title "Integration" +``` + +**2. Release Management** +```shell +# Label tests for specific releases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Mobile App" \ + labels tests add --test-ids "2001,2002,2003" --title "Release-2.0" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Mobile App" \ + labels tests add --test-id-file hotfix_tests.csv --title "Hotfix-2.1.3" +``` + +**3. Priority and Risk Assessment** +```shell +# Label tests by priority +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "E-Commerce" \ + labels tests add --test-ids "3001,3002" --title "P0-Critical" + +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "E-Commerce" \ + labels tests add --test-ids "3003,3004,3005" --title "P1-High" +``` + +**4. Test Analysis and Reporting** +```shell +# Find all regression tests +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Web App" \ + labels tests list --ids 5 + +# Get detailed label information for failed tests +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Web App" \ + labels tests get --test-id "4001,4002,4003" +``` + ### Reference ```shell $ trcli add_run --help From 99cdd3d9baf597ecc1221ae4fa76cc1f0749c5c9 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 25 Aug 2025 15:23:19 +0800 Subject: [PATCH 19/36] TRCLI-90 Added changes to implement references management for test runs --- trcli/api/api_request_handler.py | 49 ++++++++++++++++++++++++++++++- trcli/api/project_based_client.py | 8 ++++- trcli/commands/cmd_add_run.py | 30 +++++++++++++++++-- 3 files changed, 83 insertions(+), 4 deletions(-) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index a7d382c..9968208 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -448,20 +448,33 @@ def add_run( return run_id, response.error_message def update_run(self, run_id: int, run_name: str, start_date: str = None, - end_date: str = None, milestone_id: int = None) -> Tuple[dict, str]: + end_date: str = None, milestone_id: int = None, refs: str = None, refs_action: str = 'add') -> Tuple[dict, str]: """ Updates an existing run :run_id: run id :run_name: run name + :refs: references to manage + :refs_action: action to perform ('add', 'update', 'delete') :returns: Tuple with run and error string. """ run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.error_message: + return None, run_response.error_message + existing_description = run_response.response_text.get("description", "") + existing_refs = run_response.response_text.get("refs", "") add_run_data = self.data_provider.add_run(run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id) add_run_data["description"] = existing_description # Retain the current description + # Handle references based on action + if refs is not None: + updated_refs = self._manage_references(existing_refs, refs, refs_action) + add_run_data["refs"] = updated_refs + else: + add_run_data["refs"] = existing_refs # Keep existing refs if none provided + run_tests, error_message = self.__get_all_tests_in_run(run_id) run_case_ids = [test["case_id"] for test in run_tests] report_case_ids = add_run_data["case_ids"] @@ -489,6 +502,40 @@ def update_run(self, run_id: int, run_name: str, start_date: str = None, run_response = self.client.send_get(f"get_run/{run_id}") return run_response.response_text, update_response.error_message + def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> str: + """ + Manage references based on the specified action. + :existing_refs: current references in the run + :new_refs: new references to process + :action: 'add', 'update', or 'delete' + :returns: updated references string + """ + if not existing_refs: + existing_refs = "" + + if action == 'update': + # Replace all references with new ones + return new_refs + elif action == 'delete': + if not new_refs: + # Delete all references + return "" + else: + # Delete specific references + existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] + refs_to_delete = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + updated_list = [ref for ref in existing_list if ref not in refs_to_delete] + return ','.join(updated_list) + else: # action == 'add' (default) + # Add new references to existing ones + if not existing_refs: + return new_refs + existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] + new_list = [ref.strip() for ref in new_refs.split(',') if ref.strip()] + # Avoid duplicates + combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] + return ','.join(combined_list) + def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): """ Getting test result id and upload attachments for it. """ tests_in_run, error = self.__get_all_tests_in_run(run_id) diff --git a/trcli/api/project_based_client.py b/trcli/api/project_based_client.py index 1bf2737..fef7835 100644 --- a/trcli/api/project_based_client.py +++ b/trcli/api/project_based_client.py @@ -228,7 +228,13 @@ def create_or_update_test_run(self) -> Tuple[int, str]: self.environment.log(f"Updating test run. ", new_line=False) run_id = self.environment.run_id run, error_message = self.api_request_handler.update_run( - run_id, self.run_name, self.environment.milestone_id + run_id, + self.run_name, + start_date=self.environment.run_start_date, + end_date=self.environment.run_end_date, + milestone_id=self.environment.milestone_id, + refs=self.environment.run_refs, + refs_action=getattr(self.environment, 'run_refs_action', 'add') ) if self.environment.auto_close_run: self.environment.log("Closing run. ", new_line=False) diff --git a/trcli/commands/cmd_add_run.py b/trcli/commands/cmd_add_run.py index b2d270b..2a90f10 100644 --- a/trcli/commands/cmd_add_run.py +++ b/trcli/commands/cmd_add_run.py @@ -19,7 +19,8 @@ def print_config(env: Environment): f"\n> Assigned To ID: {env.run_assigned_to_id}" f"\n> Include All: {env.run_include_all}" f"\n> Case IDs: {env.run_case_ids}" - f"\n> Refs: {env.run_refs}") + f"\n> Refs: {env.run_refs}" + f"\n> Refs Action: {env.run_refs_action if hasattr(env, 'run_refs_action') else 'add'}") def write_run_to_file(environment: Environment, run_id: int): @@ -43,6 +44,12 @@ def write_run_to_file(environment: Environment, run_id: int): @click.command(context_settings=CONTEXT_SETTINGS) @click.option("--title", metavar="", help="Title of Test Run to be created or updated in TestRail.") +@click.option( + "--run-id", + type=click.IntRange(min=1), + metavar="", + help="ID of existing test run to update. If not provided, a new run will be created.", +) @click.option( "--suite-id", type=click.IntRange(min=1), @@ -97,7 +104,14 @@ def write_run_to_file(environment: Environment, run_id: int): @click.option( "--run-refs", metavar="", - help="A comma-separated list of references/requirements" + help="A comma-separated list of references/requirements (up to 2000 characters)" +) +@click.option( + "--run-refs-action", + type=click.Choice(['add', 'update', 'delete'], case_sensitive=False), + default='add', + metavar="", + help="Action to perform on references: 'add' (default), 'update' (replace all), or 'delete' (remove all or specific)" ) @click.option("-f", "--file", type=click.Path(), metavar="", help="Write run data to file.") @click.pass_context @@ -107,6 +121,18 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.cmd = "add_run" environment.set_parameters(context) environment.check_for_required_parameters() + + if environment.run_refs and len(environment.run_refs) > 2000: + environment.elog("Error: References field cannot exceed 2000 characters.") + exit(1) + + if environment.run_refs_action and environment.run_refs_action != 'add' and not environment.run_id: + environment.elog("Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required).") + exit(1) + + if environment.run_refs_action == 'delete' and not environment.run_refs and environment.run_id: + environment.run_refs = "" + print_config(environment) project_client = ProjectBasedClient( From 3e0da883494faaef9a91e15015cdab70a3afe40e Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 25 Aug 2025 15:24:05 +0800 Subject: [PATCH 20/36] TRCLI-90 Added unit and functional tests for references management for test runs --- tests/test_cmd_add_run.py | 127 ++++++++++++++++++++++++++ tests_e2e/test_end2end.py | 184 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 311 insertions(+) diff --git a/tests/test_cmd_add_run.py b/tests/test_cmd_add_run.py index 5e23430..3babe7a 100644 --- a/tests/test_cmd_add_run.py +++ b/tests/test_cmd_add_run.py @@ -1,4 +1,6 @@ from unittest import mock +import pytest +from click.testing import CliRunner from trcli.cli import Environment from trcli.commands import cmd_add_run @@ -46,3 +48,128 @@ def test_write_run_to_file_with_refs_and_description(self, mock_open_file): cmd_add_run.write_run_to_file(environment, run_id) mock_open_file.assert_called_with(file, "a") mock_open_file.return_value.__enter__().write.assert_called_once_with(expected_string) + + def test_cli_validation_refs_too_long(self): + """Test that references validation fails when exceeding 2000 characters""" + from trcli.cli import Environment + + environment = Environment() + environment.run_refs = "A" * 2001 # 2001 characters, exceeds limit + + assert len(environment.run_refs) > 2000 + + runner = CliRunner() + long_refs = "A" * 2001 + + result = runner.invoke(cmd_add_run.cli, [ + '--title', 'Test Run', + '--run-refs', long_refs + ], catch_exceptions=False) + + # Should exit with error code 1 due to missing required parameters or validation + assert result.exit_code == 1 + + def test_validation_logic_refs_action_without_run_id(self): + """Test validation logic for refs action without run_id""" + from trcli.cli import Environment + + # Update action validation + environment = Environment() + environment.run_refs_action = "update" + environment.run_id = None + environment.run_refs = "JIRA-123" + + # This should be invalid + assert environment.run_refs_action == "update" + assert environment.run_id is None + + # Delete action validation + environment.run_refs_action = "delete" + assert environment.run_refs_action == "delete" + assert environment.run_id is None + + def test_refs_action_parameter_parsing(self): + """Test that refs action parameter is parsed correctly""" + runner = CliRunner() + + # Test that the CLI accepts new param without crashing! :) - acuanico + result = runner.invoke(cmd_add_run.cli, ['--help']) + assert result.exit_code == 0 + assert "--run-refs-action" in result.output + assert "Action to perform on references" in result.output + + +class TestApiRequestHandlerReferences: + """Test class for reference management functionality""" + + def test_manage_references_add(self): + """Test adding references to existing ones""" + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.cli import Environment + from trcli.api.api_client import APIClient + from trcli.data_classes.dataclass_testrail import TestRailSuite + + environment = Environment() + api_client = APIClient("https://test.testrail.com") + suite = TestRailSuite(name="Test Suite") + handler = ApiRequestHandler(environment, api_client, suite) + + # Adding new references + result = handler._manage_references("JIRA-100,JIRA-200", "JIRA-300,JIRA-400", "add") + assert result == "JIRA-100,JIRA-200,JIRA-300,JIRA-400" + + # Adding duplicate references (should not duplicate) + result = handler._manage_references("JIRA-100,JIRA-200", "JIRA-200,JIRA-300", "add") + assert result == "JIRA-100,JIRA-200,JIRA-300" + + # Adding to empty existing references + result = handler._manage_references("", "JIRA-100,JIRA-200", "add") + assert result == "JIRA-100,JIRA-200" + + def test_manage_references_update(self): + """Test updating (replacing) all references""" + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.cli import Environment + from trcli.api.api_client import APIClient + from trcli.data_classes.dataclass_testrail import TestRailSuite + + environment = Environment() + api_client = APIClient("https://test.testrail.com") + suite = TestRailSuite(name="Test Suite") + handler = ApiRequestHandler(environment, api_client, suite) + + # Test replacing all references + result = handler._manage_references("JIRA-100,JIRA-200", "JIRA-300,JIRA-400", "update") + assert result == "JIRA-300,JIRA-400" + + # Test replacing with empty references + result = handler._manage_references("JIRA-100,JIRA-200", "", "update") + assert result == "" + + def test_manage_references_delete(self): + """Test deleting specific or all references""" + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.cli import Environment + from trcli.api.api_client import APIClient + from trcli.data_classes.dataclass_testrail import TestRailSuite + + environment = Environment() + api_client = APIClient("https://test.testrail.com") + suite = TestRailSuite(name="Test Suite") + handler = ApiRequestHandler(environment, api_client, suite) + + # Deleting specific references + result = handler._manage_references("JIRA-100,JIRA-200,JIRA-300", "JIRA-200", "delete") + assert result == "JIRA-100,JIRA-300" + + # Deleting multiple specific references + result = handler._manage_references("JIRA-100,JIRA-200,JIRA-300,JIRA-400", "JIRA-200,JIRA-400", "delete") + assert result == "JIRA-100,JIRA-300" + + # Deleting all references (empty new_refs) + result = handler._manage_references("JIRA-100,JIRA-200", "", "delete") + assert result == "" + + # Deleting non-existent references + result = handler._manage_references("JIRA-100,JIRA-200", "JIRA-999", "delete") + assert result == "JIRA-100,JIRA-200" diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 89c0b88..93018fb 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -493,6 +493,190 @@ def test_cli_add_run_and_plan_with_due_date(self): ] ) + def test_cli_add_run_refs_with_references(self): + """Test creating a run with references""" + import random + import string + + # Generate random suffix to avoid conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + add_run \\ + --title "[CLI-E2E-Tests] ADD RUN TEST: With References {random_suffix}" \\ + --run-refs "JIRA-100,JIRA-200,REQ-{random_suffix}" \\ + -f "run_config_refs.yml" + """) + _assert_contains( + output, + [ + "Creating test run.", + f"Test run: {self.TR_INSTANCE}index.php?/runs/view", + f"title: [CLI-E2E-Tests] ADD RUN TEST: With References {random_suffix}", + f"Refs: JIRA-100,JIRA-200,REQ-{random_suffix}", + "Writing test run data to file (run_config_refs.yml). Done." + ] + ) + + def test_cli_add_run_refs_validation_error(self): + """Test references validation (too long)""" + long_refs = "A" * 2001 # Exceeds 2000 character limit + + output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + add_run \\ + --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Too Long" \\ + --run-refs "{long_refs}" + """) + + assert return_code != 0 + _assert_contains( + output, + ["Error: References field cannot exceed 2000 characters."] + ) + + def test_cli_add_run_refs_update_action_validation(self): + """Test that update/delete actions require run_id""" + output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + add_run \\ + --title "[CLI-E2E-Tests] ADD RUN TEST: Invalid Action" \\ + --run-refs "JIRA-123" \\ + --run-refs-action "update" + """) + + assert return_code != 0 + _assert_contains( + output, + ["Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required)."] + ) + + def test_cli_add_run_refs_update_workflow(self): + """Test complete workflow: create run, then update references""" + import random + import string + import re + + # Generate random suffix to avoid conflicts + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + # Step 1: Create a run with initial references + create_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + add_run \\ + --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ + --run-refs "JIRA-100,JIRA-200" \\ + -f "run_config_workflow.yml" + """) + + # Extract run ID from output + run_id_match = re.search(r'run_id: (\d+)', create_output) + assert run_id_match, "Could not extract run ID from output" + run_id = run_id_match.group(1) + + _assert_contains( + create_output, + [ + "Creating test run.", + f"run_id: {run_id}", + "Refs: JIRA-100,JIRA-200" + ] + ) + + # Step 2: Add more references to the existing run + add_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + add_run \\ + --run-id {run_id} \\ + --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ + --run-refs "JIRA-300,REQ-{random_suffix}" \\ + --run-refs-action "add" + """) + + _assert_contains( + add_output, + [ + "Updating test run.", + f"run_id: {run_id}", + "Refs Action: add" + ] + ) + + # Step 3: Update (replace) all references + update_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + add_run \\ + --run-id {run_id} \\ + --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ + --run-refs "NEW-100,NEW-200" \\ + --run-refs-action "update" + """) + + _assert_contains( + update_output, + [ + "Updating test run.", + f"run_id: {run_id}", + "Refs: NEW-100,NEW-200", + "Refs Action: update" + ] + ) + + # Step 4: Delete specific references + delete_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + add_run \\ + --run-id {run_id} \\ + --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ + --run-refs "NEW-100" \\ + --run-refs-action "delete" + """) + + _assert_contains( + delete_output, + [ + "Updating test run.", + f"run_id: {run_id}", + "Refs Action: delete" + ] + ) + + # Step 5: Delete all references + delete_all_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + add_run \\ + --run-id {run_id} \\ + --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ + --run-refs-action "delete" + """) + + _assert_contains( + delete_all_output, + [ + "Updating test run.", + f"run_id: {run_id}", + "Refs: ", + "Refs Action: delete" + ] + ) + def bug_test_cli_robot_description_bug(self): output = _run_cmd(f""" From 6b64a43afcc95b1a93a65d4038df5e0809b712c6 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 25 Aug 2025 15:24:37 +0800 Subject: [PATCH 21/36] TRCLI-90 Added README updates for references managements for test runs --- README.md | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cb909fa..84d70f2 100644 --- a/README.md +++ b/README.md @@ -897,6 +897,8 @@ Usage: trcli add_run [OPTIONS] Options: --title Title of Test Run to be created or updated in TestRail. + --run-id ID of existing test run to update. If not provided, + a new run will be created. [x>=1] --suite-id Suite ID to submit results to. [x>=1] --run-description Summary text to be added to the test run. --milestone-id Milestone ID to which the Test Run should be @@ -909,7 +911,9 @@ Options: --auto-close-run Use this option to automatically close the created run. --run-case-ids Comma separated list of test case IDs to include in the test run (i.e.: 1,2,3,4). - --run-refs A comma-separated list of references/requirements + --run-refs A comma-separated list of references/requirements (up to 2000 characters) + --run-refs-action Action to perform on references: 'add' (default), 'update' (replace all), + or 'delete' (remove all or specific) -f, --file Write run title and id to file. --help Show this message and exit. ``` @@ -922,6 +926,84 @@ run_id: 1 This file can be used as the config file (or appended to an existing config file) in a later run. +### Managing References in Test Runs + +The `add_run` command supports comprehensive reference management for test runs. References are stored in TestRail's "References" field and can contain up to 2000 characters. + +#### Adding References to New Runs + +When creating a new test run, you can add references using the `--run-refs` option: + +```bash +trcli -y -h https://example.testrail.io/ --project "My Project" \ + add_run --title "My Test Run" --run-refs "JIRA-100,JIRA-200,REQ-001" +``` + +#### Managing References in Existing Runs + +For existing test runs, you can use the `--run-refs-action` option to specify how references should be handled: + +**Add References (default behavior):** +```bash +trcli -y -h https://example.testrail.io/ --project "My Project" \ + add_run --run-id 123 --title "My Test Run" \ + --run-refs "JIRA-300,JIRA-400" --run-refs-action "add" +``` + +**Update (Replace) All References:** +```bash +trcli -y -h https://example.testrail.io/ --project "My Project" \ + add_run --run-id 123 --title "My Test Run" \ + --run-refs "NEW-100,NEW-200" --run-refs-action "update" +``` + +**Delete Specific References:** +```bash +trcli -y -h https://example.testrail.io/ --project "My Project" \ + add_run --run-id 123 --title "My Test Run" \ + --run-refs "JIRA-100,JIRA-200" --run-refs-action "delete" +``` + +**Delete All References:** +```bash +trcli -y -h https://example.testrail.io/ --project "My Project" \ + add_run --run-id 123 --title "My Test Run" \ + --run-refs-action "delete" +``` + +#### Reference Management Rules + +- **Character Limit**: References field supports up to 2000 characters +- **Format**: Comma-separated list of reference IDs +- **Duplicate Prevention**: When adding references, duplicates are automatically prevented +- **Action Requirements**: `update` and `delete` actions require an existing run (--run-id must be provided) +- **Validation**: Invalid reference formats are rejected with clear error messages + +#### Examples + +**Complete Workflow Example:** +```bash +# 1. Create run with initial references +trcli -y -h https://example.testrail.io/ <--username and --password or --key> --project "My Project" \ + add_run --title "Sprint 1 Tests" --run-refs "JIRA-100,JIRA-200" -f "run_config.yml" + +# 2. Add more references (from the config file) +trcli -y -h https://example.testrail.io/ <--username and --password or --key> --project "My Project" \ + -c run_config.yml add_run --run-refs "JIRA-300,REQ-001" --run-refs-action "add" + +# 3. Replace all references with new ones +trcli -y -h https://example.testrail.io/ <--username and --password or --key> --project "My Project" \ + -c run_config.yml add_run --run-refs "FINAL-100,FINAL-200" --run-refs-action "update" + +# 4. Remove specific references +trcli -y -h https://example.testrail.io/ <--username and --password or --key> --project "My Project" \ + -c run_config.yml add_run --run-refs "FINAL-100" --run-refs-action "delete" + +# 5. Clear all references +trcli -y -h https://example.testrail.io/ <--username and --password or --key> --project "My Project" \ + -c run_config.yml add_run --run-refs-action "delete" +``` + Generating test cases from OpenAPI specs ----------------- From 62f1290076a546789c97423fa4ad2b3ea8d9c341 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 28 Aug 2025 14:40:14 +0800 Subject: [PATCH 22/36] TRCLI-114 Allow multiple labels, added new validations and updated tests --- tests/test_cmd_labels.py | 128 +++++++++++++++++++++++++- trcli/api/api_request_handler.py | 152 ++++++++++++++++++++----------- trcli/commands/cmd_labels.py | 100 +++++++++++++------- 3 files changed, 287 insertions(+), 93 deletions(-) diff --git a/tests/test_cmd_labels.py b/tests/test_cmd_labels.py index 108b1ba..1a37536 100644 --- a/tests/test_cmd_labels.py +++ b/tests/test_cmd_labels.py @@ -669,7 +669,7 @@ def test_add_label_to_tests_success(self, mock_project_client): assert result.exit_code == 0 mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( - test_ids=[1], title='Test Label', project_id=1 + test_ids=[1], titles=['Test Label'], project_id=1 ) mock_log.assert_any_call("Successfully processed 1 test(s):") @@ -709,7 +709,7 @@ def test_add_label_to_tests_with_csv_file(self, mock_project_client): assert result.exit_code == 0 mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( - test_ids=[1, 2], title='Test Label', project_id=1 + test_ids=[1, 2], titles=['Test Label'], project_id=1 ) mock_log.assert_any_call("Loaded 2 test ID(s) from file 'test_ids.csv'") @@ -883,5 +883,125 @@ def test_add_label_to_tests_with_warnings(self, mock_project_client): mock_log.assert_any_call(" Test ID 999 does not exist or is not accessible") mock_log.assert_any_call("Warning: 1 test(s) already have maximum labels (10):") mock_log.assert_any_call(" Test 2: Maximum labels reached") - - \ No newline at end of file + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_multiple_labels_to_tests_success(self, mock_project_client): + """Test successful addition of multiple labels to tests""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [ + {'test_id': 1, 'message': 'Successfully added 2 labels (label1, label2) to test 1'}, + {'test_id': 2, 'message': 'Successfully added 2 labels (label1, label2) to test 2'} + ], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1,2', '--title', 'label1,label2'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( + test_ids=[1, 2], titles=['label1', 'label2'], project_id=1 + ) + mock_log.assert_any_call("Successfully processed 2 test(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_tests_mixed_valid_invalid(self, mock_project_client): + """Test mixed valid/invalid labels - should process valid ones and warn about invalid ones""" + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + mock_client_instance.api_request_handler.add_labels_to_tests.return_value = ( + { + 'successful_tests': [ + {'test_id': 1, 'message': "Successfully added label 'valid-label' to test 1"} + ], + 'failed_tests': [], + 'max_labels_reached': [], + 'test_not_found': [] + }, + "" + ) + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', 'valid-label,this-title-is-way-too-long-for-testrail'], + obj=self.environment + ) + + # Should succeed with valid label + assert result.exit_code == 0 + + # Should warn about invalid label + mock_elog.assert_any_call("Warning: Label title 'this-title-is-way-too-long-for-testrail' exceeds 20 character limit and will be skipped.") + + # Should process the valid label + mock_client_instance.api_request_handler.add_labels_to_tests.assert_called_once_with( + test_ids=[1], titles=['valid-label'], project_id=1 + ) + + # Should show success for valid label + mock_log.assert_any_call("Successfully processed 1 test(s):") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_tests_all_invalid_titles(self, mock_project_client): + """Test when all labels are invalid - should fail""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', 'this-title-is-way-too-long,another-title-that-is-also-too-long'], + obj=self.environment + ) + + # Should fail when all labels are invalid + assert result.exit_code == 1 + + # Should show warnings for all invalid labels + mock_elog.assert_any_call("Warning: Label title 'this-title-is-way-too-long' exceeds 20 character limit and will be skipped.") + mock_elog.assert_any_call("Warning: Label title 'another-title-that-is-also-too-long' exceeds 20 character limit and will be skipped.") + mock_elog.assert_any_call("Error: No valid label titles provided after filtering.") + + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_labels_to_tests_max_labels_validation(self, mock_project_client): + """Test early validation for more than 10 labels""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + # Create a title string with 11 labels + long_title_list = ','.join([f'label{i}' for i in range(1, 12)]) + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1', '--title', long_title_list], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_called_with("Error: Cannot add more than 10 labels at once. You provided 11 valid labels.") + + \ No newline at end of file diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index a7d382c..f906212 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1046,12 +1046,12 @@ def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: L return matching_cases, "" - def add_labels_to_tests(self, test_ids: List[int], title: str, project_id: int) -> Tuple[dict, str]: + def add_labels_to_tests(self, test_ids: List[int], titles: Union[str, List[str]], project_id: int) -> Tuple[dict, str]: """ - Add a label to multiple tests + Add labels to multiple tests :param test_ids: List of test IDs - :param title: Label title (max 20 characters) + :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) :param project_id: Project ID for validation :returns: Tuple with response data and error string """ @@ -1063,6 +1063,19 @@ def add_labels_to_tests(self, test_ids: List[int], title: str, project_id: int) 'test_not_found': [] } + # Normalize titles to a list + if isinstance(titles, str): + title_list = [titles] + else: + title_list = titles + + # At this point, title_list should already be validated by the CLI + # Just ensure we have clean titles + title_list = [title.strip() for title in title_list if title.strip()] + + if not title_list: + return {}, "No valid labels provided" + # Validate test IDs by getting run information for each test valid_test_ids = [] for test_id in test_ids: @@ -1092,25 +1105,33 @@ def add_labels_to_tests(self, test_ids: List[int], title: str, project_id: int) if not valid_test_ids: return results, "" - # Check if label exists or create it + # Check if labels exist or create them existing_labels, error_message = self.get_labels(project_id) if error_message: return results, error_message - - # Find existing label with the same title - label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') - break - # Create label if it doesn't exist - if label_id is None: - label_data, error_message = self.add_label(project_id, title) - if error_message: - return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') + # Process each title to get/create label IDs + label_ids = [] + label_id_to_title = {} # Map label IDs to their titles + for title in title_list: + # Find existing label with the same title + label_id = None + for label in existing_labels.get('labels', []): + if label.get('title') == title: + label_id = label.get('id') + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get('label', label_data) + label_id = label_info.get('id') + + if label_id: + label_ids.append(label_id) + label_id_to_title[label_id] = title # Collect test data and validate constraints tests_to_update = [] @@ -1126,26 +1147,43 @@ def add_labels_to_tests(self, test_ids: List[int], title: str, project_id: int) test_data = test_response.response_text current_labels = test_data.get('labels', []) + current_label_ids = [label.get('id') for label in current_labels if label.get('id')] - # Check if label already exists on this test - if any(label.get('id') == label_id for label in current_labels): + new_label_ids = [] + already_exists_titles = [] + + for label_id in label_ids: + if label_id not in current_label_ids: + new_label_ids.append(label_id) + else: + if label_id in label_id_to_title: + already_exists_titles.append(label_id_to_title[label_id]) + + if not new_label_ids: results['successful_tests'].append({ 'test_id': test_id, - 'message': f"Label '{title}' already exists on test {test_id}" + 'message': f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}" }) continue # Check maximum labels limit (10) - if len(current_labels) >= 10: + if len(current_label_ids) + len(new_label_ids) > 10: results['max_labels_reached'].append(test_id) continue # Prepare test for update - existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] - updated_label_ids = existing_label_ids + [label_id] + updated_label_ids = current_label_ids + new_label_ids + + new_label_titles = [] + for label_id in new_label_ids: + if label_id in label_id_to_title: + new_label_titles.append(label_id_to_title[label_id]) + tests_to_update.append({ 'test_id': test_id, - 'labels': updated_label_ids + 'labels': updated_label_ids, + 'new_labels': new_label_ids, + 'new_label_titles': new_label_titles }) # Update tests using appropriate endpoint @@ -1157,47 +1195,51 @@ def add_labels_to_tests(self, test_ids: List[int], title: str, project_id: int) update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) if update_response.status_code == 200: + new_label_titles = test_info.get('new_label_titles', []) + new_label_count = len(new_label_titles) + + if new_label_count == 1: + message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" + elif new_label_count > 1: + message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" + else: + message = f"No new labels added to test {test_info['test_id']}" + results['successful_tests'].append({ 'test_id': test_info['test_id'], - 'message': f"Successfully added label '{title}' to test {test_info['test_id']}" + 'message': message }) else: results['failed_tests'].append({ 'test_id': test_info['test_id'], 'error': update_response.error_message }) - elif len(tests_to_update) > 1: - # Multiple tests: use update_tests endpoint with comma-separated test IDs - test_ids_str = ",".join(str(test_info['test_id']) for test_info in tests_to_update) - batch_update_data = { - 'test_ids': [test_info['test_id'] for test_info in tests_to_update], - 'labels': tests_to_update[0]['labels'] # Assuming same labels for all tests - } - - batch_response = self.client.send_post(f"update_tests/{test_ids_str}", payload=batch_update_data) - - if batch_response.status_code == 200: - for test_info in tests_to_update: + else: + # Multiple tests: use individual updates to ensure each test gets its specific labels + for test_info in tests_to_update: + test_update_data = {'labels': test_info['labels']} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + new_label_titles = test_info.get('new_label_titles', []) + new_label_count = len(new_label_titles) + + if new_label_count == 1: + message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" + elif new_label_count > 1: + message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" + else: + message = f"No new labels added to test {test_info['test_id']}" + results['successful_tests'].append({ 'test_id': test_info['test_id'], - 'message': f"Successfully added label '{title}' to test {test_info['test_id']}" + 'message': message + }) + else: + results['failed_tests'].append({ + 'test_id': test_info['test_id'], + 'error': update_response.error_message }) - else: - # If batch update fails, fall back to individual updates - for test_info in tests_to_update: - test_update_data = {'labels': test_info['labels']} - update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - - if update_response.status_code == 200: - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': f"Successfully added label '{title}' to test {test_info['test_id']}" - }) - else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) return results, "" diff --git a/trcli/commands/cmd_labels.py b/trcli/commands/cmd_labels.py index 0095059..376d32a 100644 --- a/trcli/commands/cmd_labels.py +++ b/trcli/commands/cmd_labels.py @@ -383,18 +383,45 @@ def tests(environment: Environment, context: click.Context, *args, **kwargs): @tests.command(name='add') @click.option("--test-ids", metavar="", help="Comma-separated list of test IDs (e.g., 1,2,3).") @click.option("--test-id-file", metavar="", help="CSV file containing test IDs.") -@click.option("--title", required=True, metavar="", help="Title of the label to add (max 20 characters).") +@click.option("--title", required=True, metavar="", help="Label title(s) to add (max 20 characters each). Use comma separation for multiple labels (e.g., 'label1,label2').") @click.pass_context @pass_environment def add_to_tests(environment: Environment, context: click.Context, test_ids: str, test_id_file: str, title: str, *args, **kwargs): - """Add a label to tests""" + """Add label(s) to tests""" environment.check_for_required_parameters() print_config(environment, "Add Tests") - if len(title) > 20: - environment.elog("Error: Label title must be 20 characters or less.") + # Parse comma-separated titles + title_list = [t.strip() for t in title.split(",") if t.strip()] + + # Filter valid and invalid labels + valid_titles = [] + invalid_titles = [] + + for t in title_list: + if len(t) > 20: + invalid_titles.append(t) + else: + valid_titles.append(t) + + # Show warnings for invalid labels but continue with valid ones + if invalid_titles: + for invalid_title in invalid_titles: + environment.elog(f"Warning: Label title '{invalid_title}' exceeds 20 character limit and will be skipped.") + + # Check if we have any valid labels left + if not valid_titles: + environment.elog("Error: No valid label titles provided after filtering.") exit(1) + # Validate maximum number of valid labels (TestRail limit is 10 labels per test) + if len(valid_titles) > 10: + environment.elog(f"Error: Cannot add more than 10 labels at once. You provided {len(valid_titles)} valid labels.") + exit(1) + + # Use only valid titles for processing + title_list = valid_titles + # Validate that either test_ids or test_id_file is provided if not test_ids and not test_id_file: environment.elog("Error: Either --test-ids or --test-id-file must be provided.") @@ -499,43 +526,48 @@ def add_to_tests(environment: Environment, context: click.Context, test_ids: str ) project_client.resolve_project() - environment.log(f"Adding label '{title}' to {len(test_id_list)} test(s)...") + # Log message adjusted for single/multiple labels + if len(title_list) == 1: + environment.log(f"Adding label '{title_list[0]}' to {len(test_id_list)} test(s)...") + else: + environment.log(f"Adding {len(title_list)} labels ({', '.join(title_list)}) to {len(test_id_list)} test(s)...") results, error_message = project_client.api_request_handler.add_labels_to_tests( test_ids=test_id_list, - title=title, + titles=title_list, project_id=project_client.project.project_id ) + # Handle validation errors (warnings, not fatal) if error_message: - environment.elog(f"Failed to add labels to tests: {error_message}") - exit(1) - else: - # Report results - successful_tests = results.get('successful_tests', []) - failed_tests = results.get('failed_tests', []) - max_labels_reached = results.get('max_labels_reached', []) - test_not_found = results.get('test_not_found', []) - - if test_not_found: - environment.log(f"Warning: {len(test_not_found)} test(s) not found or not accessible:") - for test_id in test_not_found: - environment.log(f" Test ID {test_id} does not exist or is not accessible") - - if successful_tests: - environment.log(f"Successfully processed {len(successful_tests)} test(s):") - for test_result in successful_tests: - environment.log(f" Test {test_result['test_id']}: {test_result['message']}") - - if max_labels_reached: - environment.log(f"Warning: {len(max_labels_reached)} test(s) already have maximum labels (10):") - for test_id in max_labels_reached: - environment.log(f" Test {test_id}: Maximum labels reached") - - if failed_tests: - environment.log(f"Failed to process {len(failed_tests)} test(s):") - for test_result in failed_tests: - environment.log(f" Test {test_result['test_id']}: {test_result['error']}") + environment.elog(f"Warning: {error_message}") + + # Process results + # Report results + successful_tests = results.get('successful_tests', []) + failed_tests = results.get('failed_tests', []) + max_labels_reached = results.get('max_labels_reached', []) + test_not_found = results.get('test_not_found', []) + + if test_not_found: + environment.log(f"Warning: {len(test_not_found)} test(s) not found or not accessible:") + for test_id in test_not_found: + environment.log(f" Test ID {test_id} does not exist or is not accessible") + + if successful_tests: + environment.log(f"Successfully processed {len(successful_tests)} test(s):") + for test_result in successful_tests: + environment.log(f" Test {test_result['test_id']}: {test_result['message']}") + + if max_labels_reached: + environment.log(f"Warning: {len(max_labels_reached)} test(s) already have maximum labels (10):") + for test_id in max_labels_reached: + environment.log(f" Test {test_id}: Maximum labels reached") + + if failed_tests: + environment.log(f"Failed to process {len(failed_tests)} test(s):") + for test_result in failed_tests: + environment.log(f" Test {test_result['test_id']}: {test_result['error']}") @tests.command(name='list') From e55f5a294269786cba6db24134f327914dac59c3 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 1 Sep 2025 15:26:56 +0800 Subject: [PATCH 23/36] TRCLI-114 updated parameter name for labels tests --test-id to --test-ids for consistency, also updated readme guide and affected tests --- README.md | 6 +++--- tests/test_api_request_handler_labels.py | 8 ++++---- tests/test_cmd_labels.py | 8 +++++--- tests_e2e/test_end2end.py | 2 +- trcli/commands/cmd_labels.py | 6 +++--- 5 files changed, 16 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index cb909fa..cd7b2dd 100644 --- a/README.md +++ b/README.md @@ -770,12 +770,12 @@ Retrieve all labels assigned to specific tests. # Get labels for a single test $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - labels tests get --test-id 123 + labels tests get --test-ids 123 # Get labels for multiple tests $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - labels tests get --test-id "123,124,125" + labels tests get --test-ids "123,124,125" ``` **Output example:** @@ -884,7 +884,7 @@ $ trcli -h https://yourinstance.testrail.io --username --passwor # Get detailed label information for failed tests $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Web App" \ - labels tests get --test-id "4001,4002,4003" + labels tests get --test-ids "4001,4002,4003" ``` ### Reference diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index 203f197..abe2d17 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -767,7 +767,7 @@ def test_add_labels_to_tests_success_single(self, labels_handler): result, error = labels_handler.add_labels_to_tests( test_ids=[1], - title="Test Label", + titles="Test Label", project_id=1 ) @@ -789,7 +789,7 @@ def test_add_labels_to_tests_test_not_found(self, labels_handler): with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): result, error = labels_handler.add_labels_to_tests( test_ids=[999], - title="Test Label", + titles="Test Label", project_id=1 ) @@ -844,7 +844,7 @@ def test_add_labels_to_tests_max_labels_reached(self, labels_handler): result, error = labels_handler.add_labels_to_tests( test_ids=[1], - title="New Label", + titles="New Label", project_id=1 ) @@ -1017,7 +1017,7 @@ def test_add_labels_to_tests_batch_update(self, labels_handler): result, error = labels_handler.add_labels_to_tests( test_ids=[1, 2], - title="Test Label", + titles="Test Label", project_id=1 ) diff --git a/tests/test_cmd_labels.py b/tests/test_cmd_labels.py index 1a37536..08d61f7 100644 --- a/tests/test_cmd_labels.py +++ b/tests/test_cmd_labels.py @@ -731,7 +731,7 @@ def test_add_label_to_tests_validation_error(self, mock_project_client): @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') def test_add_label_to_tests_title_too_long(self, mock_project_client): - """Test validation error for title too long""" + """Test validation error for title too long - should fail when all labels are invalid""" long_title = "a" * 21 # 21 characters, exceeds limit with patch.object(self.environment, 'elog') as mock_elog, \ @@ -745,7 +745,9 @@ def test_add_label_to_tests_title_too_long(self, mock_project_client): ) assert result.exit_code == 1 - mock_elog.assert_any_call("Error: Label title must be 20 characters or less.") + # Should show warning for invalid label, then error for no valid labels + mock_elog.assert_any_call(f"Warning: Label title '{long_title}' exceeds 20 character limit and will be skipped.") + mock_elog.assert_any_call("Error: No valid label titles provided after filtering.") @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') def test_list_tests_by_label_success(self, mock_project_client): @@ -808,7 +810,7 @@ def test_get_test_labels_success(self, mock_project_client): result = self.runner.invoke( cmd_labels.tests, - ['get', '--test-id', '1'], + ['get', '--test-ids', '1'], obj=self.environment ) diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 89c0b88..3976af2 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -1346,7 +1346,7 @@ def test_labels_tests_full_workflow(self): -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests get \\ - --test-id "{','.join(test_ids)}" + --test-ids "{','.join(test_ids)}" """) _assert_contains( get_test_labels_output, diff --git a/trcli/commands/cmd_labels.py b/trcli/commands/cmd_labels.py index 376d32a..4a71e18 100644 --- a/trcli/commands/cmd_labels.py +++ b/trcli/commands/cmd_labels.py @@ -620,16 +620,16 @@ def list_tests(environment: Environment, context: click.Context, ids: str, *args @tests.command(name='get') -@click.option("--test-id", required=True, metavar="", help="Comma-separated list of test IDs (e.g., 1,2,3).") +@click.option("--test-ids", required=True, metavar="", help="Comma-separated list of test IDs (e.g., 1,2,3).") @click.pass_context @pass_environment -def get_test_labels(environment: Environment, context: click.Context, test_id: str, *args, **kwargs): +def get_test_labels(environment: Environment, context: click.Context, test_ids: str, *args, **kwargs): """Get the labels of tests using test IDs""" environment.check_for_required_parameters() print_config(environment, "Get Test Labels") try: - test_id_list = [int(id.strip()) for id in test_id.split(",")] + test_id_list = [int(id.strip()) for id in test_ids.split(",")] except ValueError: environment.elog("Error: Invalid test IDs format. Use comma-separated integers (e.g., 1,2,3).") exit(1) From 718ab00cd6ea27219345ddbc3b40e6e54007eb96 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 1 Sep 2025 20:34:57 +0800 Subject: [PATCH 24/36] TRCLI-114 added option --run-id to also filter tests by runs in label tests lists, also updated unit tests --- tests/test_api_request_handler_labels.py | 51 ++++++++++++++++++++++++ tests/test_cmd_labels.py | 22 ++++++++-- tests_e2e/test_end2end.py | 10 +++-- trcli/api/api_request_handler.py | 30 +++++++++----- trcli/commands/cmd_labels.py | 16 ++++++-- 5 files changed, 110 insertions(+), 19 deletions(-) diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index abe2d17..a3bb667 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -896,6 +896,57 @@ def test_get_tests_by_label_success(self, labels_handler): assert result[0]['id'] == 1 assert result[1]['id'] == 3 + def test_get_tests_by_label_with_run_ids(self, labels_handler): + """Test retrieval of tests by label filtered by specific run IDs""" + # Mock run responses for specific run IDs + mock_run_response_1 = APIClientResult( + status_code=200, + response_text={"id": 1, "name": "Test Run 1"}, + error_message=None + ) + + mock_run_response_2 = APIClientResult( + status_code=200, + response_text={"id": 2, "name": "Test Run 2"}, + error_message=None + ) + + # Mock tests responses for each run + mock_tests_response_run1 = APIClientResult( + status_code=200, + response_text={"tests": [ + {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]} + ]}, + error_message=None + ) + + mock_tests_response_run2 = APIClientResult( + status_code=200, + response_text={"tests": [ + {"id": 2, "title": "Test 2", "labels": [{"id": 5, "title": "Test Label"}]} + ]}, + error_message=None + ) + + with patch.object(labels_handler.client, 'send_get') as mock_get: + mock_get.side_effect = [ + mock_run_response_1, # get_run/1 + mock_run_response_2, # get_run/2 + mock_tests_response_run1, # get_tests/1 + mock_tests_response_run2 # get_tests/2 + ] + + result, error = labels_handler.get_tests_by_label( + project_id=1, + label_ids=[5], + run_ids=[1, 2] + ) + + assert error == "" + assert len(result) == 2 + assert result[0]['id'] == 1 + assert result[1]['id'] == 2 + def test_get_test_labels_success(self, labels_handler): """Test successful retrieval of test labels""" # Mock test responses diff --git a/tests/test_cmd_labels.py b/tests/test_cmd_labels.py index 08d61f7..679440f 100644 --- a/tests/test_cmd_labels.py +++ b/tests/test_cmd_labels.py @@ -772,13 +772,13 @@ def test_list_tests_by_label_success(self, mock_project_client): result = self.runner.invoke( cmd_labels.tests, - ['list', '--ids', '5'], + ['list', '--run-id', '1', '--ids', '5'], obj=self.environment ) assert result.exit_code == 0 mock_client_instance.api_request_handler.get_tests_by_label.assert_called_once_with( - project_id=1, label_ids=[5] + project_id=1, label_ids=[5], run_ids=[1] ) mock_log.assert_any_call("Found 2 matching test(s):") @@ -828,13 +828,29 @@ def test_list_tests_invalid_ids(self, mock_project_client): result = self.runner.invoke( cmd_labels.tests, - ['list', '--ids', 'invalid,ids'], + ['list', '--run-id', '1', '--ids', 'invalid,ids'], obj=self.environment ) assert result.exit_code == 1 mock_elog.assert_any_call("Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3).") + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_list_tests_invalid_run_ids(self, mock_project_client): + """Test invalid run IDs format in list command""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['list', '--run-id', 'invalid,run', '--ids', '5'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: Invalid run IDs format. Use comma-separated integers (e.g., 1,2,3).") + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') def test_add_label_to_tests_csv_file_not_found(self, mock_project_client): """Test error when CSV file is not found""" diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 3976af2..1ff10ac 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -1324,18 +1324,20 @@ def test_labels_tests_full_workflow(self): ] ) - # Test 3: List tests by label ID + # Test 3: List tests by label ID from a specific run + # Use a realistic run ID - for E2E testing we'll use run ID 1 as a common test run list_tests_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests list \\ + --run-id "1" \\ --ids "{label_id}" """) _assert_contains( list_tests_output, [ - f"Retrieving tests with label IDs: {label_id}...", + f"Retrieving tests from run IDs: 1 with label IDs: {label_id}...", "matching test(s):" ] ) @@ -1411,6 +1413,7 @@ def test_labels_tests_validation_errors(self): -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests list \\ + --run-id "1" \\ --ids "invalid,ids" """) assert return_code != 0 @@ -1455,7 +1458,8 @@ def test_labels_tests_help_commands(self): tests_list_help_output, [ "Usage: trcli labels tests list [OPTIONS]", - "List tests filtered by label ID", + "List tests filtered by label ID from specific runs", + "--run-id", "--ids" ] ) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index f906212..aa7a172 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1243,13 +1243,14 @@ def add_labels_to_tests(self, test_ids: List[int], titles: Union[str, List[str]] return results, "" - def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: + def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None) -> Tuple[List[dict], str]: """ - Get tests filtered by label ID or title + Get tests filtered by label ID or title from specific runs :param project_id: Project ID :param label_ids: List of label IDs to filter by :param label_title: Label title to filter by + :param run_ids: List of run IDs to filter tests from (optional, defaults to all runs) :returns: Tuple with list of matching tests and error string """ # If filtering by title, first get the label ID @@ -1266,13 +1267,24 @@ def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label if not target_label_ids: return [], "" # No label found is a valid case with 0 results - # Get all runs for the project to find tests - runs_response = self.client.send_get(f"get_runs/{project_id}") - if runs_response.status_code != 200: - return [], runs_response.error_message - - runs_data = runs_response.response_text - runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data + # Get runs for the project (either all runs or specific run IDs) + if run_ids: + # Use specific run IDs - validate they exist by getting run details + runs = [] + for run_id in run_ids: + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.status_code == 200: + runs.append(run_response.response_text) + else: + return [], f"Run ID {run_id} not found or inaccessible" + else: + # Get all runs for the project + runs_response = self.client.send_get(f"get_runs/{project_id}") + if runs_response.status_code != 200: + return [], runs_response.error_message + + runs_data = runs_response.response_text + runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data # Collect all tests from all runs matching_tests = [] diff --git a/trcli/commands/cmd_labels.py b/trcli/commands/cmd_labels.py index 4a71e18..7e53515 100644 --- a/trcli/commands/cmd_labels.py +++ b/trcli/commands/cmd_labels.py @@ -571,14 +571,21 @@ def add_to_tests(environment: Environment, context: click.Context, test_ids: str @tests.command(name='list') +@click.option("--run-id", required=True, metavar="", help="Comma-separated list of run IDs to filter tests from (e.g., 1,2,3).") @click.option("--ids", required=True, metavar="", help="Comma-separated list of label IDs to filter by (e.g., 1,2,3).") @click.pass_context @pass_environment -def list_tests(environment: Environment, context: click.Context, ids: str, *args, **kwargs): - """List tests filtered by label ID""" +def list_tests(environment: Environment, context: click.Context, run_id: str, ids: str, *args, **kwargs): + """List tests filtered by label ID from specific runs""" environment.check_for_required_parameters() print_config(environment, "List Tests by Label") + try: + run_ids = [int(id.strip()) for id in run_id.split(",")] + except ValueError: + environment.elog("Error: Invalid run IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + try: label_ids = [int(id.strip()) for id in ids.split(",")] except ValueError: @@ -591,11 +598,12 @@ def list_tests(environment: Environment, context: click.Context, ids: str, *args ) project_client.resolve_project() - environment.log(f"Retrieving tests with label IDs: {', '.join(map(str, label_ids))}...") + environment.log(f"Retrieving tests from run IDs: {', '.join(map(str, run_ids))} with label IDs: {', '.join(map(str, label_ids))}...") matching_tests, error_message = project_client.api_request_handler.get_tests_by_label( project_id=project_client.project.project_id, - label_ids=label_ids + label_ids=label_ids, + run_ids=run_ids ) if error_message: From 841ce54e3138c0573775e3c9719ea0187abf1c17 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 1 Sep 2025 20:35:30 +0800 Subject: [PATCH 25/36] TRCLI-114 updated readme guide to include --run-id for labels tests list --- README.md | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index cd7b2dd..7b8f8c2 100644 --- a/README.md +++ b/README.md @@ -740,23 +740,23 @@ Or simple format: ``` ###### Listing Tests by Labels -Find tests that have specific labels applied by label ID. +Find tests that have specific labels applied by label ID from specific test runs. ```shell -# List tests by label ID +# List tests by label ID from a specific run $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - labels tests list --ids 123 + labels tests list --run-id 456 --ids 123 -# List tests by multiple label IDs +# List tests by multiple label IDs from multiple runs $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - labels tests list --ids "123,124,125" + labels tests list --run-id "456,457" --ids "123,124,125" ``` **Output example:** ``` -Retrieving tests with label IDs: 123... +Retrieving tests from run IDs: 456 with label IDs: 123... Found 2 matching test(s): Test ID: 1001, Title: 'Login functionality test', Status: 1 [Labels: ID:123,Title:'Regression'; ID:124,Title:'Critical'] @@ -812,8 +812,9 @@ Options: ```shell $ trcli labels tests list --help Options: - --ids Comma-separated list of label IDs to filter by [required] - --help Show this message and exit. + --run-id Comma-separated list of run IDs to filter tests from [required] + --ids Comma-separated list of label IDs to filter by [required] + --help Show this message and exit. ``` **Get Tests Command:** @@ -876,10 +877,10 @@ $ trcli -h https://yourinstance.testrail.io --username --passwor **4. Test Analysis and Reporting** ```shell -# Find all regression tests +# Find all regression tests from run 101 $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Web App" \ - labels tests list --ids 5 + labels tests list --run-id 101 --ids 5 # Get detailed label information for failed tests $ trcli -h https://yourinstance.testrail.io --username --password \ From b9312fe10aa396b481b99cf86c71567c8b9e5335 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 3 Sep 2025 14:30:20 +0800 Subject: [PATCH 26/36] TRCLI-123 updated click to support updated version --- setup.py | 2 +- tests/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 299b726..8ca4a94 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ ], include_package_data=True, install_requires=[ - "click==8.0.3", + "click>=8.1.0,<8.2.2", # Note: click version 8.2.2 is yanked as of Aug 2, 2025! "pyyaml>=6.0.0,<7.0.0", "junitparser>=3.1.0,<4.0.0", "pyserde==0.12.*", diff --git a/tests/requirements.txt b/tests/requirements.txt index f79424e..a2843bd 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -5,7 +5,7 @@ allure-pytest pytest-freezegun pytest-mock requests-mock -click==8.0.3 +click==8.2.1 pyyaml junitparser pyserde==0.12.* From 966ca5691e8e56e47061bcffa5dcc90c695cbfb6 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 3 Sep 2025 14:31:02 +0800 Subject: [PATCH 27/36] TRCLI-106 added command to always execute coverage reports during unit tests --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 44c9547..95aac4a 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,7 @@ commands = pip install -r tests/requirements-variable-deps.txt pip list coverage run -m pytest -c tests/pytest.ini -W ignore::pytest.PytestCollectionWarning tests + coverage report -m allowlist_externals = cd From 7a3a2802d3b4ea61fb7927a2174952504c70ef3c Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 8 Sep 2025 16:45:52 +0800 Subject: [PATCH 28/36] TRCLI-89 update support for new references subcommand --- trcli/api/api_request_handler.py | 107 +++++++++++++++ trcli/commands/cmd_references.py | 224 +++++++++++++++++++++++++++++++ trcli/constants.py | 1 + 3 files changed, 332 insertions(+) create mode 100644 trcli/commands/cmd_references.py diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index aa7a172..6f122e7 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1344,3 +1344,110 @@ def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: }) return results, "" + + # Test case reference management methods + def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: + """ + Add references to a test case + :param case_id: ID of the test case + :param references: List of references to add + :returns: Tuple with success status and error string + """ + # First get the current test case to retrieve existing references + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" + + case_data = case_response.response_text + existing_refs = case_data.get('refs', '') or '' + + # Parse existing references + existing_ref_list = [] + if existing_refs: + existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] + + # Add new references (avoid duplicates) + all_refs = existing_ref_list.copy() + for ref in references: + if ref not in all_refs: + all_refs.append(ref) + + # Join all references + new_refs_string = ', '.join(all_refs) + + # Validate total character limit + if len(new_refs_string) > 2000: + return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" + + # Update the test case with new references + update_data = {'refs': new_refs_string} + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.status_code == 200: + return True, "" + else: + return False, update_response.error_message + + def update_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: + """ + Update references on a test case by replacing existing ones + :param case_id: ID of the test case + :param references: List of references to replace existing ones + :returns: Tuple with success status and error string + """ + # Join references + new_refs_string = ', '.join(references) + + # Validate total character limit + if len(new_refs_string) > 2000: + return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" + + # Update the test case with new references + update_data = {'refs': new_refs_string} + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.status_code == 200: + return True, "" + else: + return False, update_response.error_message + + def delete_case_references(self, case_id: int, specific_references: List[str] = None) -> Tuple[bool, str]: + """ + Delete all or specific references from a test case + :param case_id: ID of the test case + :param specific_references: List of specific references to delete (None to delete all) + :returns: Tuple with success status and error string + """ + if specific_references is None: + # Delete all references by setting refs to empty string + update_data = {'refs': ''} + else: + # First get the current test case to retrieve existing references + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" + + case_data = case_response.response_text + existing_refs = case_data.get('refs', '') or '' + + if not existing_refs: + # No references to delete + return True, "" + + # Parse existing references + existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] + + # Remove specific references + remaining_refs = [ref for ref in existing_ref_list if ref not in specific_references] + + # Join remaining references + new_refs_string = ', '.join(remaining_refs) + update_data = {'refs': new_refs_string} + + # Update the test case + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.status_code == 200: + return True, "" + else: + return False, update_response.error_message diff --git a/trcli/commands/cmd_references.py b/trcli/commands/cmd_references.py new file mode 100644 index 0000000..e3774f1 --- /dev/null +++ b/trcli/commands/cmd_references.py @@ -0,0 +1,224 @@ +import click + +from trcli.api.project_based_client import ProjectBasedClient +from trcli.cli import pass_environment, CONTEXT_SETTINGS, Environment +from trcli.data_classes.dataclass_testrail import TestRailSuite + + +def print_config(env: Environment, action: str): + env.log(f"References {action} Execution Parameters" + f"\n> TestRail instance: {env.host} (user: {env.username})" + f"\n> Project: {env.project if env.project else env.project_id}") + + +@click.group(context_settings=CONTEXT_SETTINGS) +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, *args, **kwargs): + """Manage references in TestRail""" + environment.cmd = "references" + environment.set_parameters(context) + + +@cli.group() +@click.pass_context +@pass_environment +def cases(environment: Environment, context: click.Context, *args, **kwargs): + """Manage references for test cases""" + pass + + +@cases.command(name='add') +@click.option("--test-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") +@click.option("--refs", required=True, metavar="", help="Comma-separated list of references to add (e.g., REQ-1,REQ-2).") +@click.pass_context +@pass_environment +def add_references(environment: Environment, context: click.Context, test_ids: str, refs: str, *args, **kwargs): + """Add references to test cases""" + environment.check_for_required_parameters() + print_config(environment, "Add References") + + # Parse test case IDs + try: + test_case_ids = [int(id.strip()) for id in test_ids.split(",")] + except ValueError: + environment.elog("Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + # Parse references - allow up to 2000 characters total + references = [ref.strip() for ref in refs.split(",") if ref.strip()] + if not references: + environment.elog("Error: No valid references provided.") + exit(1) + + # Validate total character limit + total_refs_length = len(", ".join(references)) + if total_refs_length > 2000: + environment.elog(f"Error: Total references length ({total_refs_length} characters) exceeds 2000 character limit.") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Adding references to {len(test_case_ids)} test case(s)...") + environment.log(f"References: {', '.join(references)}") + + # Process each test case + success_count = 0 + failed_cases = [] + + for case_id in test_case_ids: + success, error_message = project_client.api_request_handler.add_case_references( + case_id=case_id, + references=references + ) + + if success: + success_count += 1 + environment.log(f" ✓ Test case {case_id}: References added successfully") + else: + failed_cases.append({"case_id": case_id, "error": error_message}) + environment.elog(f" ✗ Test case {case_id}: {error_message}") + + # Summary + if success_count > 0: + environment.log(f"Successfully added references to {success_count} test case(s)") + + if failed_cases: + environment.elog(f"Failed to add references to {len(failed_cases)} test case(s)") + exit(1) + + +@cases.command(name='update') +@click.option("--test-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") +@click.option("--refs", required=True, metavar="", help="Comma-separated list of references to replace existing ones (e.g., REQ-1,REQ-2).") +@click.pass_context +@pass_environment +def update_references(environment: Environment, context: click.Context, test_ids: str, refs: str, *args, **kwargs): + """Update references on test cases by replacing existing ones""" + environment.check_for_required_parameters() + print_config(environment, "Update References") + + # Parse test case IDs + try: + test_case_ids = [int(id.strip()) for id in test_ids.split(",")] + except ValueError: + environment.elog("Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + # Parse references - allow up to 2000 characters total + references = [ref.strip() for ref in refs.split(",") if ref.strip()] + if not references: + environment.elog("Error: No valid references provided.") + exit(1) + + # Validate total character limit + total_refs_length = len(", ".join(references)) + if total_refs_length > 2000: + environment.elog(f"Error: Total references length ({total_refs_length} characters) exceeds 2000 character limit.") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + environment.log(f"Updating references for {len(test_case_ids)} test case(s)...") + environment.log(f"New references: {', '.join(references)}") + + # Process each test case + success_count = 0 + failed_cases = [] + + for case_id in test_case_ids: + success, error_message = project_client.api_request_handler.update_case_references( + case_id=case_id, + references=references + ) + + if success: + success_count += 1 + environment.log(f" ✓ Test case {case_id}: References updated successfully") + else: + failed_cases.append({"case_id": case_id, "error": error_message}) + environment.elog(f" ✗ Test case {case_id}: {error_message}") + + # Summary + if success_count > 0: + environment.log(f"Successfully updated references for {success_count} test case(s)") + + if failed_cases: + environment.elog(f"Failed to update references for {len(failed_cases)} test case(s)") + exit(1) + + +@cases.command(name='delete') +@click.option("--test-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") +@click.option("--refs", metavar="", help="Comma-separated list of specific references to delete. If not provided, all references will be deleted.") +@click.confirmation_option(prompt="Are you sure you want to delete these references?") +@click.pass_context +@pass_environment +def delete_references(environment: Environment, context: click.Context, test_ids: str, refs: str = None, *args, **kwargs): + """Delete all or specific references from test cases""" + environment.check_for_required_parameters() + print_config(environment, "Delete References") + + # Parse test case IDs + try: + test_case_ids = [int(id.strip()) for id in test_ids.split(",")] + except ValueError: + environment.elog("Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3).") + exit(1) + + # Parse specific references if provided + specific_refs = None + if refs: + specific_refs = [ref.strip() for ref in refs.split(",") if ref.strip()] + if not specific_refs: + environment.elog("Error: No valid references provided.") + exit(1) + + project_client = ProjectBasedClient( + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + ) + project_client.resolve_project() + + if specific_refs: + environment.log(f"Deleting specific references from {len(test_case_ids)} test case(s)...") + environment.log(f"References to delete: {', '.join(specific_refs)}") + else: + environment.log(f"Deleting all references from {len(test_case_ids)} test case(s)...") + + # Process each test case + success_count = 0 + failed_cases = [] + + for case_id in test_case_ids: + success, error_message = project_client.api_request_handler.delete_case_references( + case_id=case_id, + specific_references=specific_refs + ) + + if success: + success_count += 1 + if specific_refs: + environment.log(f" ✓ Test case {case_id}: Specific references deleted successfully") + else: + environment.log(f" ✓ Test case {case_id}: All references deleted successfully") + else: + failed_cases.append({"case_id": case_id, "error": error_message}) + environment.elog(f" ✗ Test case {case_id}: {error_message}") + + # Summary + if success_count > 0: + environment.log(f"Successfully deleted references from {success_count} test case(s)") + + if failed_cases: + environment.elog(f"Failed to delete references from {len(failed_cases)} test case(s)") + exit(1) + diff --git a/trcli/constants.py b/trcli/constants.py index 34cf1f7..beecfc5 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -72,6 +72,7 @@ parse_openapi=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), parse_robot=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), labels=dict(**FAULT_MAPPING), + references=dict(**FAULT_MAPPING), ) PROMPT_MESSAGES = dict( From 6f168a36900fee2429de5d1c3a5578789aab343f Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Mon, 8 Sep 2025 16:47:30 +0800 Subject: [PATCH 29/36] TRCLI-89 updated readme guide, unit and functional tests for references --- README.md | 148 ++++++++ tests/test_api_request_handler_references.py | 351 +++++++++++++++++++ tests/test_cmd_references.py | 263 ++++++++++++++ tests_e2e/test_end2end.py | 112 ++++++ 4 files changed, 874 insertions(+) create mode 100644 tests/test_api_request_handler_references.py create mode 100644 tests/test_cmd_references.py diff --git a/README.md b/README.md index 7b8f8c2..ee26cd2 100644 --- a/README.md +++ b/README.md @@ -888,6 +888,154 @@ $ trcli -h https://yourinstance.testrail.io --username --passwor labels tests get --test-ids "4001,4002,4003" ``` +#### References Management + +The TestRail CLI provides comprehensive reference management capabilities using the `references` command. References help link test assets to external requirements, user stories, or other documentation, making it easier to track test coverage and maintain traceability. + +The TestRail CLI supports complete reference management for test cases with the following operations: +- **Add**: Add references to existing test cases without removing existing ones +- **Update**: Replace all existing references with new ones +- **Delete**: Remove all or specific references from test cases + +All reference operations support validation and error handling, with a 2000-character limit for the total references field per test case. + +##### Reference Management Features + +**Test Case References Support:** +- **Add** references to test cases while preserving existing ones (2000 characters maximum, single or multiple test cases) +- **Update** references by replacing existing ones entirely +- **Delete** all references or specific references from test cases + +###### Adding References to Test Cases +Add references to test cases without removing existing ones. New references are appended to any existing references. + +```shell +# Add references to a single test case +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + references cases add --test-ids 123 --refs "REQ-001,REQ-002" + +# Add references to multiple test cases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + references cases add --test-ids "123,124,125" --refs "STORY-456,BUG-789" +``` + +**Output example:** +``` +Adding references to 2 test case(s)... +References: REQ-001, REQ-002 + ✓ Test case 123: References added successfully + ✓ Test case 124: References added successfully +Successfully added references to 2 test case(s) +``` + +###### Updating References on Test Cases +Replace all existing references with new ones. This completely overwrites any existing references. + +```shell +# Update references for a single test case +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + references cases update --test-ids 123 --refs "REQ-003,REQ-004" + +# Update references for multiple test cases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + references cases update --test-ids "123,124" --refs "EPIC-100,STORY-200" +``` + +**Output example:** +``` +Updating references for 2 test case(s)... +New references: REQ-003, REQ-004 + ✓ Test case 123: References updated successfully + ✓ Test case 124: References updated successfully +Successfully updated references for 2 test case(s) +``` + +###### Deleting References from Test Cases +Remove all references or specific references from test cases. + +```shell +# Delete all references from test cases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + references cases delete --test-ids "123,124" + +# Delete specific references from test cases +$ trcli -h https://yourinstance.testrail.io --username --password \ + --project "Your Project" \ + references cases delete --test-ids "123,124" --refs "REQ-001,STORY-456" +``` + +**Output example:** +``` +Deleting all references from 2 test case(s)... + ✓ Test case 123: All references deleted successfully + ✓ Test case 124: All references deleted successfully +Successfully deleted references from 2 test case(s) +``` + +##### Reference Management Command Reference + +**Main References Command:** +```shell +$ trcli references --help +Usage: trcli references [OPTIONS] COMMAND [ARGS]... + + Manage references in TestRail + +Options: + --help Show this message and exit. + +Commands: + cases Manage references for test cases +``` + +**Test Cases References Commands:** +```shell +$ trcli references cases --help +Usage: trcli references cases [OPTIONS] COMMAND [ARGS]... + + Manage references for test cases + +Options: + --help Show this message and exit. + +Commands: + add Add references to test cases + delete Delete all or specific references from test cases + update Update references on test cases by replacing existing ones +``` + +**Add References Command:** +```shell +$ trcli references cases add --help +Options: + --test-ids Comma-separated list of test case IDs [required] + --refs Comma-separated list of references to add [required] + --help Show this message and exit. +``` + +**Update References Command:** +```shell +$ trcli references cases update --help +Options: + --test-ids Comma-separated list of test case IDs [required] + --refs Comma-separated list of references to replace existing ones [required] + --help Show this message and exit. +``` + +**Delete References Command:** +```shell +$ trcli references cases delete --help +Options: + --test-ids Comma-separated list of test case IDs [required] + --refs Comma-separated list of specific references to delete (optional) + --help Show this message and exit. +``` + ### Reference ```shell $ trcli add_run --help diff --git a/tests/test_api_request_handler_references.py b/tests/test_api_request_handler_references.py new file mode 100644 index 0000000..8cf74c7 --- /dev/null +++ b/tests/test_api_request_handler_references.py @@ -0,0 +1,351 @@ +import pytest +from unittest.mock import MagicMock, patch + +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.api.api_client import APIClientResult +from trcli.cli import Environment +from trcli.data_classes.dataclass_testrail import TestRailSuite + + +class TestApiRequestHandlerReferences: + """Test class for reference management API methods""" + + @pytest.fixture + def references_handler(self): + """Create an ApiRequestHandler instance for testing""" + environment = Environment() + environment.host = "https://test.testrail.com" + environment.username = "test@example.com" + environment.password = "password" + + mock_client = MagicMock() + suite = TestRailSuite(name="Test Suite") + + handler = ApiRequestHandler( + environment=environment, + api_client=mock_client, + suites_data=suite, + verify=False + ) + return handler + + def test_add_case_references_success(self, references_handler): + """Test successful addition of references to a test case""" + # Mock get_case response + mock_get_case_response = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test Case 1", + "refs": "REQ-1, REQ-2" + }, + error_message=None + ) + + # Mock update_case response + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "refs": "REQ-1, REQ-2, REQ-3, REQ-4"}, + error_message=None + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response), \ + patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.add_case_references( + case_id=1, + references=["REQ-3", "REQ-4"] + ) + + assert success is True + assert error == "" + + # Verify the API calls + references_handler.client.send_get.assert_called_once_with("get_case/1") + references_handler.client.send_post.assert_called_once_with( + "update_case/1", + {'refs': 'REQ-1, REQ-2, REQ-3, REQ-4'} + ) + + def test_add_case_references_no_existing_refs(self, references_handler): + """Test adding references to a test case with no existing references""" + # Mock get_case response with no refs + mock_get_case_response = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test Case 1", + "refs": "" + }, + error_message=None + ) + + # Mock update_case response + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "refs": "REQ-1, REQ-2"}, + error_message=None + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response), \ + patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.add_case_references( + case_id=1, + references=["REQ-1", "REQ-2"] + ) + + assert success is True + assert error == "" + + # Verify the update call + references_handler.client.send_post.assert_called_once_with( + "update_case/1", + {'refs': 'REQ-1, REQ-2'} + ) + + def test_add_case_references_avoid_duplicates(self, references_handler): + """Test that duplicate references are not added""" + # Mock get_case response with existing refs + mock_get_case_response = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test Case 1", + "refs": "REQ-1, REQ-2" + }, + error_message=None + ) + + # Mock update_case response + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "refs": "REQ-1, REQ-2, REQ-3"}, + error_message=None + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response), \ + patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.add_case_references( + case_id=1, + references=["REQ-1", "REQ-3"] # REQ-1 already exists + ) + + assert success is True + assert error == "" + + # Verify only REQ-3 was added (no duplicate REQ-1) + references_handler.client.send_post.assert_called_once_with( + "update_case/1", + {'refs': 'REQ-1, REQ-2, REQ-3'} + ) + + def test_add_case_references_case_not_found(self, references_handler): + """Test handling of non-existent test case""" + mock_get_case_response = APIClientResult( + status_code=404, + response_text=None, + error_message="Test case not found" + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response): + + success, error = references_handler.add_case_references( + case_id=999, + references=["REQ-1"] + ) + + assert success is False + assert error == "Failed to retrieve test case 999: Test case not found" + + def test_add_case_references_character_limit_exceeded(self, references_handler): + """Test character limit validation""" + # Mock get_case response with existing refs + mock_get_case_response = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test Case 1", + "refs": "REQ-1" * 500 # Long existing refs + }, + error_message=None + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response): + + # Try to add more refs that would exceed 2000 chars + long_refs = ["REQ-" + "X" * 500 for _ in range(5)] + + success, error = references_handler.add_case_references( + case_id=1, + references=long_refs + ) + + assert success is False + assert "exceeds 2000 character limit" in error + + def test_update_case_references_success(self, references_handler): + """Test successful update of references on a test case""" + # Mock update_case response + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "refs": "REQ-3, REQ-4"}, + error_message=None + ) + + with patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.update_case_references( + case_id=1, + references=["REQ-3", "REQ-4"] + ) + + assert success is True + assert error == "" + + # Verify the API call + references_handler.client.send_post.assert_called_once_with( + "update_case/1", + {'refs': 'REQ-3, REQ-4'} + ) + + def test_update_case_references_character_limit_exceeded(self, references_handler): + """Test character limit validation for update""" + # Try to update with refs that exceed 2000 chars + long_refs = ["REQ-" + "X" * 500 for _ in range(5)] + + success, error = references_handler.update_case_references( + case_id=1, + references=long_refs + ) + + assert success is False + assert "exceeds 2000 character limit" in error + + def test_update_case_references_api_failure(self, references_handler): + """Test API failure during update""" + # Mock update_case response with failure + mock_update_response = APIClientResult( + status_code=400, + response_text=None, + error_message="Invalid test case ID" + ) + + with patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.update_case_references( + case_id=1, + references=["REQ-1"] + ) + + assert success is False + assert error == "Invalid test case ID" + + def test_delete_case_references_all_success(self, references_handler): + """Test successful deletion of all references""" + # Mock update_case response + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "refs": ""}, + error_message=None + ) + + with patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.delete_case_references( + case_id=1, + specific_references=None # Delete all + ) + + assert success is True + assert error == "" + + # Verify the API call + references_handler.client.send_post.assert_called_once_with( + "update_case/1", + {'refs': ''} + ) + + def test_delete_case_references_specific_success(self, references_handler): + """Test successful deletion of specific references""" + # Mock get_case response + mock_get_case_response = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test Case 1", + "refs": "REQ-1, REQ-2, REQ-3, REQ-4" + }, + error_message=None + ) + + # Mock update_case response + mock_update_response = APIClientResult( + status_code=200, + response_text={"id": 1, "refs": "REQ-1, REQ-4"}, + error_message=None + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response), \ + patch.object(references_handler.client, 'send_post', return_value=mock_update_response): + + success, error = references_handler.delete_case_references( + case_id=1, + specific_references=["REQ-2", "REQ-3"] + ) + + assert success is True + assert error == "" + + # Verify the API calls + references_handler.client.send_get.assert_called_once_with("get_case/1") + references_handler.client.send_post.assert_called_once_with( + "update_case/1", + {'refs': 'REQ-1, REQ-4'} + ) + + def test_delete_case_references_no_existing_refs(self, references_handler): + """Test deletion when no references exist""" + # Mock get_case response with no refs + mock_get_case_response = APIClientResult( + status_code=200, + response_text={ + "id": 1, + "title": "Test Case 1", + "refs": "" + }, + error_message=None + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response): + + success, error = references_handler.delete_case_references( + case_id=1, + specific_references=["REQ-1"] + ) + + assert success is True + assert error == "" + + # Verify no update call was made since there were no refs to delete + references_handler.client.send_post.assert_not_called() + + def test_delete_case_references_case_not_found(self, references_handler): + """Test handling of non-existent test case during deletion""" + mock_get_case_response = APIClientResult( + status_code=404, + response_text=None, + error_message="Test case not found" + ) + + with patch.object(references_handler.client, 'send_get', return_value=mock_get_case_response): + + success, error = references_handler.delete_case_references( + case_id=999, + specific_references=["REQ-1"] + ) + + assert success is False + assert error == "Failed to retrieve test case 999: Test case not found" + diff --git a/tests/test_cmd_references.py b/tests/test_cmd_references.py new file mode 100644 index 0000000..fd42905 --- /dev/null +++ b/tests/test_cmd_references.py @@ -0,0 +1,263 @@ +import pytest +from unittest import mock +from unittest.mock import MagicMock, patch +from click.testing import CliRunner + +from trcli.cli import Environment +from trcli.commands import cmd_references +from trcli.data_classes.dataclass_testrail import TestRailSuite +from trcli.api.project_based_client import ProjectBasedClient + + +class TestCmdReferences: + """Test class for references command functionality""" + + def setup_method(self): + """Set up test environment""" + self.runner = CliRunner() + self.environment = Environment(cmd="references") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_add_references_success(self, mock_project_client): + """Test successful addition of references to test cases""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.add_case_references.return_value = (True, "") + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['add', '--test-ids', '1,2', '--refs', 'REQ-1,REQ-2'], + obj=self.environment + ) + + assert result.exit_code == 0 + # Verify API calls were made for each test case + assert mock_client_instance.api_request_handler.add_case_references.call_count == 2 + mock_log.assert_any_call("Adding references to 2 test case(s)...") + mock_log.assert_any_call("References: REQ-1, REQ-2") + mock_log.assert_any_call("Successfully added references to 2 test case(s)") + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_add_references_invalid_test_ids(self, mock_project_client): + """Test invalid test case IDs format in add command""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['add', '--test-ids', 'invalid,ids', '--refs', 'REQ-1'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3).") + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_add_references_empty_refs(self, mock_project_client): + """Test empty references in add command""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['add', '--test-ids', '1,2', '--refs', ',,,'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: No valid references provided.") + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_add_references_too_long(self, mock_project_client): + """Test references exceeding 2000 character limit""" + long_refs = ','.join([f'REQ-{i}' * 100 for i in range(10)]) # Create very long references + + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['add', '--test-ids', '1', '--refs', long_refs], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call(mock.ANY) # Check that an error was logged + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_add_references_api_failure(self, mock_project_client): + """Test API failure during reference addition""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.add_case_references.return_value = (False, "API Error") + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['add', '--test-ids', '1', '--refs', 'REQ-1'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call(" ✗ Test case 1: API Error") + mock_elog.assert_any_call("Failed to add references to 1 test case(s)") + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_update_references_success(self, mock_project_client): + """Test successful update of references on test cases""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.update_case_references.return_value = (True, "") + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['update', '--test-ids', '1,2', '--refs', 'REQ-3,REQ-4'], + obj=self.environment + ) + + assert result.exit_code == 0 + # Verify API calls were made for each test case + assert mock_client_instance.api_request_handler.update_case_references.call_count == 2 + mock_log.assert_any_call("Updating references for 2 test case(s)...") + mock_log.assert_any_call("New references: REQ-3, REQ-4") + mock_log.assert_any_call("Successfully updated references for 2 test case(s)") + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_delete_references_all_success(self, mock_project_client): + """Test successful deletion of all references from test cases""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.delete_case_references.return_value = (True, "") + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['delete', '--test-ids', '1,2', '--yes'], + obj=self.environment + ) + + assert result.exit_code == 0 + # Verify API calls were made for each test case + assert mock_client_instance.api_request_handler.delete_case_references.call_count == 2 + # Check that None was passed for specific_references (delete all) + mock_client_instance.api_request_handler.delete_case_references.assert_called_with( + case_id=mock.ANY, specific_references=None + ) + mock_log.assert_any_call("Deleting all references from 2 test case(s)...") + mock_log.assert_any_call("Successfully deleted references from 2 test case(s)") + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_delete_references_specific_success(self, mock_project_client): + """Test successful deletion of specific references from test cases""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.api_request_handler.delete_case_references.return_value = (True, "") + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['delete', '--test-ids', '1', '--refs', 'REQ-1,REQ-2', '--yes'], + obj=self.environment + ) + + assert result.exit_code == 0 + # Verify API call was made with specific references + mock_client_instance.api_request_handler.delete_case_references.assert_called_with( + case_id=1, specific_references=['REQ-1', 'REQ-2'] + ) + mock_log.assert_any_call("Deleting specific references from 1 test case(s)...") + mock_log.assert_any_call("References to delete: REQ-1, REQ-2") + mock_log.assert_any_call("Successfully deleted references from 1 test case(s)") + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_delete_references_empty_specific_refs(self, mock_project_client): + """Test deletion with empty specific references""" + with patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['delete', '--test-ids', '1', '--refs', ',,,', '--yes'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_elog.assert_any_call("Error: No valid references provided.") + + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_mixed_success_and_failure(self, mock_project_client): + """Test scenario with mixed success and failure results""" + # Mock the project client and its methods + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + + # Mock different responses for different test cases + def mock_add_refs(case_id, references): + if case_id == 1: + return True, "" + else: + return False, "Test case not found" + + mock_client_instance.api_request_handler.add_case_references.side_effect = mock_add_refs + + # Mock environment methods + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'elog') as mock_elog, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['add', '--test-ids', '1,2', '--refs', 'REQ-1'], + obj=self.environment + ) + + assert result.exit_code == 1 + mock_log.assert_any_call(" ✓ Test case 1: References added successfully") + mock_elog.assert_any_call(" ✗ Test case 2: Test case not found") + mock_log.assert_any_call("Successfully added references to 1 test case(s)") + mock_elog.assert_any_call("Failed to add references to 1 test case(s)") + diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 1ff10ac..1829d8a 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -1474,4 +1474,116 @@ def test_labels_tests_help_commands(self): "--test-id" ] ) + + def test_references_cases_help_commands(self): + """Test references cases help commands""" + + # Test main references help + references_help_output = _run_cmd("trcli references --help") + _assert_contains( + references_help_output, + [ + "Usage: trcli references [OPTIONS] COMMAND [ARGS]...", + "Manage references in TestRail", + "cases" + ] + ) + + # Test references cases help + cases_help_output = _run_cmd("trcli references cases --help") + _assert_contains( + cases_help_output, + [ + "Usage: trcli references cases [OPTIONS] COMMAND [ARGS]...", + "Manage references for test cases", + "add", + "update", + "delete" + ] + ) + + # Test references cases add help + add_help_output = _run_cmd("trcli references cases add --help") + _assert_contains( + add_help_output, + [ + "Usage: trcli references cases add [OPTIONS]", + "Add references to test cases", + "--test-ids", + "--refs" + ] + ) + + # Test references cases update help + update_help_output = _run_cmd("trcli references cases update --help") + _assert_contains( + update_help_output, + [ + "Usage: trcli references cases update [OPTIONS]", + "Update references on test cases by replacing existing ones", + "--test-ids", + "--refs" + ] + ) + + # Test references cases delete help + delete_help_output = _run_cmd("trcli references cases delete --help") + _assert_contains( + delete_help_output, + [ + "Usage: trcli references cases delete [OPTIONS]", + "Delete all or specific references from test cases", + "--test-ids", + "--refs" + ] + ) + + def test_references_cases_error_scenarios(self): + """Test references cases error scenarios""" + + # Test invalid test case IDs format + invalid_ids_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + references cases add \\ + --test-ids "invalid,ids" \\ + --refs "REQ-1" + """) + assert return_code != 0 + _assert_contains( + invalid_ids_output, + ["Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + ) + + # Test empty references + empty_refs_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + references cases add \\ + --test-ids "321" \\ + --refs ",,," + """) + assert return_code != 0 + _assert_contains( + empty_refs_output, + ["Error: No valid references provided."] + ) + + # Test references too long (over 2000 characters) + long_refs = ','.join([f'REQ-{i}' * 100 for i in range(10)]) # Create very long references + long_refs_output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + references cases add \\ + --test-ids "321" \\ + --refs "{long_refs}" + """) + assert return_code != 0 + _assert_contains( + long_refs_output, + ["exceeds 2000 character limit"] + ) \ No newline at end of file From bda4b375396983413a215ab12c2af91a392ae360 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 9 Sep 2025 16:28:42 +0800 Subject: [PATCH 30/36] TRCLI-90 Fixed incorrect character limit for references for test runs, also updated README and affected tests --- README.md | 6 +++--- tests/test_cmd_add_run.py | 24 ++++++++++++++++++++---- tests_e2e/test_end2end.py | 4 ++-- trcli/commands/cmd_add_run.py | 6 +++--- 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 84d70f2..3130f70 100644 --- a/README.md +++ b/README.md @@ -911,7 +911,7 @@ Options: --auto-close-run Use this option to automatically close the created run. --run-case-ids Comma separated list of test case IDs to include in the test run (i.e.: 1,2,3,4). - --run-refs A comma-separated list of references/requirements (up to 2000 characters) + --run-refs A comma-separated list of references/requirements (up to 250 characters) --run-refs-action Action to perform on references: 'add' (default), 'update' (replace all), or 'delete' (remove all or specific) -f, --file Write run title and id to file. @@ -928,7 +928,7 @@ This file can be used as the config file (or appended to an existing config file ### Managing References in Test Runs -The `add_run` command supports comprehensive reference management for test runs. References are stored in TestRail's "References" field and can contain up to 2000 characters. +The `add_run` command supports comprehensive reference management for test runs. References are stored in TestRail's "References" field and can contain up to 250 characters. #### Adding References to New Runs @@ -973,7 +973,7 @@ trcli -y -h https://example.testrail.io/ --project "My Project" \ #### Reference Management Rules -- **Character Limit**: References field supports up to 2000 characters +- **Character Limit**: References field supports up to 250 characters - **Format**: Comma-separated list of reference IDs - **Duplicate Prevention**: When adding references, duplicates are automatically prevented - **Action Requirements**: `update` and `delete` actions require an existing run (--run-id must be provided) diff --git a/tests/test_cmd_add_run.py b/tests/test_cmd_add_run.py index 3babe7a..88a52a7 100644 --- a/tests/test_cmd_add_run.py +++ b/tests/test_cmd_add_run.py @@ -50,16 +50,16 @@ def test_write_run_to_file_with_refs_and_description(self, mock_open_file): mock_open_file.return_value.__enter__().write.assert_called_once_with(expected_string) def test_cli_validation_refs_too_long(self): - """Test that references validation fails when exceeding 2000 characters""" + """Test that references validation fails when exceeding 250 characters""" from trcli.cli import Environment environment = Environment() - environment.run_refs = "A" * 2001 # 2001 characters, exceeds limit + environment.run_refs = "A" * 251 # 251 characters, exceeds limit - assert len(environment.run_refs) > 2000 + assert len(environment.run_refs) > 250 runner = CliRunner() - long_refs = "A" * 2001 + long_refs = "A" * 251 result = runner.invoke(cmd_add_run.cli, [ '--title', 'Test Run', @@ -69,6 +69,22 @@ def test_cli_validation_refs_too_long(self): # Should exit with error code 1 due to missing required parameters or validation assert result.exit_code == 1 + def test_cli_validation_refs_exactly_250_chars(self): + """Test that references validation passes with exactly 250 characters""" + from trcli.cli import Environment + + runner = CliRunner() + refs_250 = "A" * 250 # Exactly 250 characters, should pass validation + + result = runner.invoke(cmd_add_run.cli, [ + '--title', 'Test Run', + '--run-refs', refs_250 + ], catch_exceptions=False) + + # Should not fail due to refs validation (will fail due to missing required parameters) + # But the important thing is that it doesn't fail with the character limit error + assert "References field cannot exceed 250 characters" not in result.output + def test_validation_logic_refs_action_without_run_id(self): """Test validation logic for refs action without run_id""" from trcli.cli import Environment diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 93018fb..4823da3 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -523,7 +523,7 @@ def test_cli_add_run_refs_with_references(self): def test_cli_add_run_refs_validation_error(self): """Test references validation (too long)""" - long_refs = "A" * 2001 # Exceeds 2000 character limit + long_refs = "A" * 251 # Exceeds 250 character limit output, return_code = _run_cmd_allow_failure(f""" trcli -y \\ @@ -537,7 +537,7 @@ def test_cli_add_run_refs_validation_error(self): assert return_code != 0 _assert_contains( output, - ["Error: References field cannot exceed 2000 characters."] + ["Error: References field cannot exceed 250 characters."] ) def test_cli_add_run_refs_update_action_validation(self): diff --git a/trcli/commands/cmd_add_run.py b/trcli/commands/cmd_add_run.py index 2a90f10..7e84073 100644 --- a/trcli/commands/cmd_add_run.py +++ b/trcli/commands/cmd_add_run.py @@ -104,7 +104,7 @@ def write_run_to_file(environment: Environment, run_id: int): @click.option( "--run-refs", metavar="", - help="A comma-separated list of references/requirements (up to 2000 characters)" + help="A comma-separated list of references/requirements (up to 250 characters)" ) @click.option( "--run-refs-action", @@ -122,8 +122,8 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.set_parameters(context) environment.check_for_required_parameters() - if environment.run_refs and len(environment.run_refs) > 2000: - environment.elog("Error: References field cannot exceed 2000 characters.") + if environment.run_refs and len(environment.run_refs) > 250: + environment.elog("Error: References field cannot exceed 250 characters.") exit(1) if environment.run_refs_action and environment.run_refs_action != 'add' and not environment.run_id: From 19b7766f3641b00839d5207bf4f211477000e557 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 10 Sep 2025 08:58:33 +0800 Subject: [PATCH 31/36] TRCLI-89 Fix added spaces into cases references, also updated parameter name to --case-ids, updated readme guide and affected tests --- README.md | 18 ++++++++-------- tests/test_api_request_handler_references.py | 10 ++++----- tests/test_cmd_references.py | 20 +++++++++--------- tests_e2e/test_end2end.py | 12 +++++------ trcli/api/api_request_handler.py | 6 +++--- trcli/commands/cmd_references.py | 22 ++++++++++---------- 6 files changed, 44 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index ee26cd2..66f0b2b 100644 --- a/README.md +++ b/README.md @@ -913,12 +913,12 @@ Add references to test cases without removing existing ones. New references are # Add references to a single test case $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - references cases add --test-ids 123 --refs "REQ-001,REQ-002" + references cases add --case-ids 123 --refs "REQ-001,REQ-002" # Add references to multiple test cases $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - references cases add --test-ids "123,124,125" --refs "STORY-456,BUG-789" + references cases add --case-ids "123,124,125" --refs "STORY-456,BUG-789" ``` **Output example:** @@ -937,12 +937,12 @@ Replace all existing references with new ones. This completely overwrites any ex # Update references for a single test case $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - references cases update --test-ids 123 --refs "REQ-003,REQ-004" + references cases update --case-ids 123 --refs "REQ-003,REQ-004" # Update references for multiple test cases $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - references cases update --test-ids "123,124" --refs "EPIC-100,STORY-200" + references cases update --case-ids "123,124" --refs "EPIC-100,STORY-200" ``` **Output example:** @@ -961,12 +961,12 @@ Remove all references or specific references from test cases. # Delete all references from test cases $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - references cases delete --test-ids "123,124" + references cases delete --case-ids "123,124" # Delete specific references from test cases $ trcli -h https://yourinstance.testrail.io --username --password \ --project "Your Project" \ - references cases delete --test-ids "123,124" --refs "REQ-001,STORY-456" + references cases delete --case-ids "123,124" --refs "REQ-001,STORY-456" ``` **Output example:** @@ -1013,7 +1013,7 @@ Commands: ```shell $ trcli references cases add --help Options: - --test-ids Comma-separated list of test case IDs [required] + --case-ids Comma-separated list of test case IDs [required] --refs Comma-separated list of references to add [required] --help Show this message and exit. ``` @@ -1022,7 +1022,7 @@ Options: ```shell $ trcli references cases update --help Options: - --test-ids Comma-separated list of test case IDs [required] + --case-ids Comma-separated list of test case IDs [required] --refs Comma-separated list of references to replace existing ones [required] --help Show this message and exit. ``` @@ -1031,7 +1031,7 @@ Options: ```shell $ trcli references cases delete --help Options: - --test-ids Comma-separated list of test case IDs [required] + --case-ids Comma-separated list of test case IDs [required] --refs Comma-separated list of specific references to delete (optional) --help Show this message and exit. ``` diff --git a/tests/test_api_request_handler_references.py b/tests/test_api_request_handler_references.py index 8cf74c7..07ec075 100644 --- a/tests/test_api_request_handler_references.py +++ b/tests/test_api_request_handler_references.py @@ -64,7 +64,7 @@ def test_add_case_references_success(self, references_handler): references_handler.client.send_get.assert_called_once_with("get_case/1") references_handler.client.send_post.assert_called_once_with( "update_case/1", - {'refs': 'REQ-1, REQ-2, REQ-3, REQ-4'} + {'refs': 'REQ-1,REQ-2,REQ-3,REQ-4'} ) def test_add_case_references_no_existing_refs(self, references_handler): @@ -101,7 +101,7 @@ def test_add_case_references_no_existing_refs(self, references_handler): # Verify the update call references_handler.client.send_post.assert_called_once_with( "update_case/1", - {'refs': 'REQ-1, REQ-2'} + {'refs': 'REQ-1,REQ-2'} ) def test_add_case_references_avoid_duplicates(self, references_handler): @@ -138,7 +138,7 @@ def test_add_case_references_avoid_duplicates(self, references_handler): # Verify only REQ-3 was added (no duplicate REQ-1) references_handler.client.send_post.assert_called_once_with( "update_case/1", - {'refs': 'REQ-1, REQ-2, REQ-3'} + {'refs': 'REQ-1,REQ-2,REQ-3'} ) def test_add_case_references_case_not_found(self, references_handler): @@ -207,7 +207,7 @@ def test_update_case_references_success(self, references_handler): # Verify the API call references_handler.client.send_post.assert_called_once_with( "update_case/1", - {'refs': 'REQ-3, REQ-4'} + {'refs': 'REQ-3,REQ-4'} ) def test_update_case_references_character_limit_exceeded(self, references_handler): @@ -302,7 +302,7 @@ def test_delete_case_references_specific_success(self, references_handler): references_handler.client.send_get.assert_called_once_with("get_case/1") references_handler.client.send_post.assert_called_once_with( "update_case/1", - {'refs': 'REQ-1, REQ-4'} + {'refs': 'REQ-1,REQ-4'} ) def test_delete_case_references_no_existing_refs(self, references_handler): diff --git a/tests/test_cmd_references.py b/tests/test_cmd_references.py index fd42905..a04a26e 100644 --- a/tests/test_cmd_references.py +++ b/tests/test_cmd_references.py @@ -38,7 +38,7 @@ def test_add_references_success(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['add', '--test-ids', '1,2', '--refs', 'REQ-1,REQ-2'], + ['add', '--case-ids', '1,2', '--refs', 'REQ-1,REQ-2'], obj=self.environment ) @@ -58,7 +58,7 @@ def test_add_references_invalid_test_ids(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['add', '--test-ids', 'invalid,ids', '--refs', 'REQ-1'], + ['add', '--case-ids', 'invalid,ids', '--refs', 'REQ-1'], obj=self.environment ) @@ -74,7 +74,7 @@ def test_add_references_empty_refs(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['add', '--test-ids', '1,2', '--refs', ',,,'], + ['add', '--case-ids', '1,2', '--refs', ',,,'], obj=self.environment ) @@ -92,7 +92,7 @@ def test_add_references_too_long(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['add', '--test-ids', '1', '--refs', long_refs], + ['add', '--case-ids', '1', '--refs', long_refs], obj=self.environment ) @@ -116,7 +116,7 @@ def test_add_references_api_failure(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['add', '--test-ids', '1', '--refs', 'REQ-1'], + ['add', '--case-ids', '1', '--refs', 'REQ-1'], obj=self.environment ) @@ -140,7 +140,7 @@ def test_update_references_success(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['update', '--test-ids', '1,2', '--refs', 'REQ-3,REQ-4'], + ['update', '--case-ids', '1,2', '--refs', 'REQ-3,REQ-4'], obj=self.environment ) @@ -167,7 +167,7 @@ def test_delete_references_all_success(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['delete', '--test-ids', '1,2', '--yes'], + ['delete', '--case-ids', '1,2', '--yes'], obj=self.environment ) @@ -197,7 +197,7 @@ def test_delete_references_specific_success(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['delete', '--test-ids', '1', '--refs', 'REQ-1,REQ-2', '--yes'], + ['delete', '--case-ids', '1', '--refs', 'REQ-1,REQ-2', '--yes'], obj=self.environment ) @@ -219,7 +219,7 @@ def test_delete_references_empty_specific_refs(self, mock_project_client): result = self.runner.invoke( cmd_references.cases, - ['delete', '--test-ids', '1', '--refs', ',,,', '--yes'], + ['delete', '--case-ids', '1', '--refs', ',,,', '--yes'], obj=self.environment ) @@ -251,7 +251,7 @@ def mock_add_refs(case_id, references): result = self.runner.invoke( cmd_references.cases, - ['add', '--test-ids', '1,2', '--refs', 'REQ-1'], + ['add', '--case-ids', '1,2', '--refs', 'REQ-1'], obj=self.environment ) diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 1829d8a..a7bfc6c 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -1509,7 +1509,7 @@ def test_references_cases_help_commands(self): [ "Usage: trcli references cases add [OPTIONS]", "Add references to test cases", - "--test-ids", + "--case-ids", "--refs" ] ) @@ -1521,7 +1521,7 @@ def test_references_cases_help_commands(self): [ "Usage: trcli references cases update [OPTIONS]", "Update references on test cases by replacing existing ones", - "--test-ids", + "--case-ids", "--refs" ] ) @@ -1533,7 +1533,7 @@ def test_references_cases_help_commands(self): [ "Usage: trcli references cases delete [OPTIONS]", "Delete all or specific references from test cases", - "--test-ids", + "--case-ids", "--refs" ] ) @@ -1547,7 +1547,7 @@ def test_references_cases_error_scenarios(self): -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ - --test-ids "invalid,ids" \\ + --case-ids "invalid,ids" \\ --refs "REQ-1" """) assert return_code != 0 @@ -1562,7 +1562,7 @@ def test_references_cases_error_scenarios(self): -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ - --test-ids "321" \\ + --case-ids "321" \\ --refs ",,," """) assert return_code != 0 @@ -1578,7 +1578,7 @@ def test_references_cases_error_scenarios(self): -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ - --test-ids "321" \\ + --case-ids "321" \\ --refs "{long_refs}" """) assert return_code != 0 diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 6f122e7..bd25f3a 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1373,7 +1373,7 @@ def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool all_refs.append(ref) # Join all references - new_refs_string = ', '.join(all_refs) + new_refs_string = ','.join(all_refs) # Validate total character limit if len(new_refs_string) > 2000: @@ -1396,7 +1396,7 @@ def update_case_references(self, case_id: int, references: List[str]) -> Tuple[b :returns: Tuple with success status and error string """ # Join references - new_refs_string = ', '.join(references) + new_refs_string = ','.join(references) # Validate total character limit if len(new_refs_string) > 2000: @@ -1441,7 +1441,7 @@ def delete_case_references(self, case_id: int, specific_references: List[str] = remaining_refs = [ref for ref in existing_ref_list if ref not in specific_references] # Join remaining references - new_refs_string = ', '.join(remaining_refs) + new_refs_string = ','.join(remaining_refs) update_data = {'refs': new_refs_string} # Update the test case diff --git a/trcli/commands/cmd_references.py b/trcli/commands/cmd_references.py index e3774f1..2329c25 100644 --- a/trcli/commands/cmd_references.py +++ b/trcli/commands/cmd_references.py @@ -29,18 +29,18 @@ def cases(environment: Environment, context: click.Context, *args, **kwargs): @cases.command(name='add') -@click.option("--test-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") +@click.option("--case-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") @click.option("--refs", required=True, metavar="", help="Comma-separated list of references to add (e.g., REQ-1,REQ-2).") @click.pass_context @pass_environment -def add_references(environment: Environment, context: click.Context, test_ids: str, refs: str, *args, **kwargs): +def add_references(environment: Environment, context: click.Context, case_ids: str, refs: str, *args, **kwargs): """Add references to test cases""" environment.check_for_required_parameters() print_config(environment, "Add References") # Parse test case IDs try: - test_case_ids = [int(id.strip()) for id in test_ids.split(",")] + test_case_ids = [int(id.strip()) for id in case_ids.split(",")] except ValueError: environment.elog("Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3).") exit(1) @@ -52,7 +52,7 @@ def add_references(environment: Environment, context: click.Context, test_ids: s exit(1) # Validate total character limit - total_refs_length = len(", ".join(references)) + total_refs_length = len(",".join(references)) if total_refs_length > 2000: environment.elog(f"Error: Total references length ({total_refs_length} characters) exceeds 2000 character limit.") exit(1) @@ -93,18 +93,18 @@ def add_references(environment: Environment, context: click.Context, test_ids: s @cases.command(name='update') -@click.option("--test-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") +@click.option("--case-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") @click.option("--refs", required=True, metavar="", help="Comma-separated list of references to replace existing ones (e.g., REQ-1,REQ-2).") @click.pass_context @pass_environment -def update_references(environment: Environment, context: click.Context, test_ids: str, refs: str, *args, **kwargs): +def update_references(environment: Environment, context: click.Context, case_ids: str, refs: str, *args, **kwargs): """Update references on test cases by replacing existing ones""" environment.check_for_required_parameters() print_config(environment, "Update References") # Parse test case IDs try: - test_case_ids = [int(id.strip()) for id in test_ids.split(",")] + test_case_ids = [int(id.strip()) for id in case_ids.split(",")] except ValueError: environment.elog("Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3).") exit(1) @@ -116,7 +116,7 @@ def update_references(environment: Environment, context: click.Context, test_ids exit(1) # Validate total character limit - total_refs_length = len(", ".join(references)) + total_refs_length = len(",".join(references)) if total_refs_length > 2000: environment.elog(f"Error: Total references length ({total_refs_length} characters) exceeds 2000 character limit.") exit(1) @@ -157,19 +157,19 @@ def update_references(environment: Environment, context: click.Context, test_ids @cases.command(name='delete') -@click.option("--test-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") +@click.option("--case-ids", required=True, metavar="", help="Comma-separated list of test case IDs (e.g., 1,2,3).") @click.option("--refs", metavar="", help="Comma-separated list of specific references to delete. If not provided, all references will be deleted.") @click.confirmation_option(prompt="Are you sure you want to delete these references?") @click.pass_context @pass_environment -def delete_references(environment: Environment, context: click.Context, test_ids: str, refs: str = None, *args, **kwargs): +def delete_references(environment: Environment, context: click.Context, case_ids: str, refs: str = None, *args, **kwargs): """Delete all or specific references from test cases""" environment.check_for_required_parameters() print_config(environment, "Delete References") # Parse test case IDs try: - test_case_ids = [int(id.strip()) for id in test_ids.split(",")] + test_case_ids = [int(id.strip()) for id in case_ids.split(",")] except ValueError: environment.elog("Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3).") exit(1) From fbc3bc130b3da7e43b0f8b682c434227f6b49d60 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 11 Sep 2025 17:48:13 +0800 Subject: [PATCH 32/36] TRCLI-78 Fixed internal implementation details like removal of payload=None, refactor request_kwargs duplication --- trcli/api/api_client.py | 50 ++++++++++---------------------- trcli/api/api_request_handler.py | 9 +++--- 2 files changed, 21 insertions(+), 38 deletions(-) diff --git a/trcli/api/api_client.py b/trcli/api/api_client.py index 3add022..6a12538 100644 --- a/trcli/api/api_client.py +++ b/trcli/api/api_client.py @@ -110,41 +110,23 @@ def __send_request(self, method: str, uri: str, payload: dict, files: Dict[str, method=method, url=url, payload=payload ) if method == "POST": - if as_form_data: - # Send as application/x-www-form-urlencoded (like curl --form) - request_kwargs = { - 'url': url, - 'auth': auth, - 'data': payload, - 'timeout': self.timeout, - 'headers': headers, - 'verify': self.verify, - 'proxies': proxies - } - if files: - request_kwargs['files'] = files - response = requests.post(**request_kwargs) + request_kwargs = { + 'url': url, + 'auth': auth, + 'headers': headers, + 'timeout': self.timeout, + 'verify': self.verify, + 'proxies': proxies + } + if files: + request_kwargs["files"] = files + request_kwargs["data"] = payload if payload else {} + elif as_form_data: + request_kwargs["data"] = payload else: - # Handle different request types - request_kwargs = { - 'url': url, - 'auth': auth, - 'timeout': self.timeout, - 'headers': headers, - 'verify': self.verify, - 'proxies': proxies - } - - if files: - # When files are provided, send as multipart/form-data - request_kwargs['files'] = files - if payload: - request_kwargs['data'] = payload - else: - # When no files, send as JSON - request_kwargs['json'] = payload - - response = requests.post(**request_kwargs) + request_kwargs["json"] = payload + + response = requests.post(**request_kwargs) else: response = requests.get( url=url, diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index ea11a13..2a760ca 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -799,7 +799,7 @@ def delete_label(self, label_id: int) -> Tuple[bool, str]: :param label_id: ID of the label to delete :returns: Tuple with success status and error string """ - response = self.client.send_post(f"delete_label/{label_id}", payload=None) + response = self.client.send_post(f"delete_label/{label_id}") success = response.status_code == 200 return success, response.error_message @@ -809,9 +809,10 @@ def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: :param label_ids: List of label IDs to delete :returns: Tuple with success status and error string """ - # Send as form data with correct parameter name - label_ids_str = ",".join(map(str, label_ids)) - files = {"label_ids": (None, label_ids_str)} + # Send as form data with JSON array format + import json + label_ids_json = json.dumps(label_ids) + files = {"label_ids": (None, label_ids_json)} response = self.client.send_post("delete_labels", payload=None, files=files) success = response.status_code == 200 return success, response.error_message From 3e7ad2de2996d3e181a9b80c42c7b80fd05c5c6c Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 11 Sep 2025 17:51:06 +0800 Subject: [PATCH 33/36] TRCLI-78 Fixed affected functional and unit tests --- tests/test_api_request_handler_labels.py | 7 ++-- tests_e2e/test_end2end.py | 48 +++++++++++++++++------- 2 files changed, 37 insertions(+), 18 deletions(-) diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index ba76dc3..60b2206 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -273,7 +273,7 @@ def test_delete_labels_success(self, labels_handler): labels_handler.client.send_post.assert_called_once_with( "delete_labels", payload=None, - files={"label_ids": (None, "1,2,3")} + files={"label_ids": (None, "[1, 2, 3]")} ) def test_delete_label_single_id(self, labels_handler): @@ -291,8 +291,7 @@ def test_delete_label_single_id(self, labels_handler): assert error is None labels_handler.client.send_post.assert_called_once_with( - "delete_label/1", - payload=None + "delete_label/1" ) def test_delete_labels_batch(self, labels_handler): @@ -312,7 +311,7 @@ def test_delete_labels_batch(self, labels_handler): labels_handler.client.send_post.assert_called_once_with( "delete_labels", payload=None, - files={"label_ids": (None, "1,2,3")} + files={"label_ids": (None, "[1, 2, 3]")} ) def test_delete_labels_api_error(self, labels_handler): diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index ebf3311..f3e5b18 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -530,20 +530,27 @@ def bug_test_automation_id(self): def test_labels_full_workflow(self): """Test complete labels workflow: add, list, get, update, delete""" + # Generate random suffix to avoid conflicts with existing labels + import random + import string + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + label_title = f"e2e-{random_suffix}" + assert len(label_title) <= 20, f"Label title '{label_title}' exceeds 20 characters" + # Step 1: Add a new label add_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ - --title "E2E-Test-Label" + --title "{label_title}" """) _assert_contains( add_output, [ - "Adding label 'E2E-Test-Label'...", + f"Adding label '{label_title}'...", "Successfully added label: ID=", - "Title='E2E-Test-Label'" + f"Title='{label_title}'" ] ) @@ -566,7 +573,7 @@ def test_labels_full_workflow(self): [ "Retrieving labels...", "Found", - f"ID: {label_id}, Title: 'E2E-Test-Label'" + f"ID: {label_id}, Title: '{label_title}'" ] ) @@ -584,25 +591,27 @@ def test_labels_full_workflow(self): f"Retrieving label with ID {label_id}...", "Label details:", f"ID: {label_id}", - "Title: 'E2E-Test-Label'" + f"Title: '{label_title}'" ] ) # Step 4: Update the label + updated_title = f"upd-{random_suffix}" + assert len(updated_title) <= 20, f"Updated title '{updated_title}' exceeds 20 characters" update_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels update \\ --id {label_id} \\ - --title "Updated-E2E-Label" + --title "{updated_title}" """) _assert_contains( update_output, [ f"Updating label with ID {label_id}...", f"Successfully updated label: ID={label_id}", - "Title='Updated-E2E-Label'" + f"Title='{updated_title}'" ] ) @@ -618,7 +627,7 @@ def test_labels_full_workflow(self): get_updated_output, [ f"ID: {label_id}", - "Title: 'Updated-E2E-Label'" + f"Title: '{updated_title}'" ] ) @@ -641,31 +650,42 @@ def test_labels_full_workflow(self): def test_labels_add_multiple_and_delete_multiple(self): """Test adding multiple labels and deleting them in batch""" + # Generate random suffix to avoid conflicts with existing labels + import random + import string + random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Add first label + label1_title = f"b1-{random_suffix}" + assert len(label1_title) <= 20, f"Label1 title '{label1_title}' exceeds 20 characters" add_output1 = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ - --title "Batch-Test-1" + --title "{label1_title}" """) # Add second label + label2_title = f"b2-{random_suffix}" + assert len(label2_title) <= 20, f"Label2 title '{label2_title}' exceeds 20 characters" add_output2 = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ - --title "Batch-Test-2" + --title "{label2_title}" """) # Add third label + label3_title = f"b3-{random_suffix}" + assert len(label3_title) <= 20, f"Label3 title '{label3_title}' exceeds 20 characters" add_output3 = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ - --title "Batch-Test-3" + --title "{label3_title}" """) # Extract all label IDs @@ -687,9 +707,9 @@ def test_labels_add_multiple_and_delete_multiple(self): _assert_contains( list_output, [ - f"ID: {label_id1}, Title: 'Batch-Test-1'", - f"ID: {label_id2}, Title: 'Batch-Test-2'", - f"ID: {label_id3}, Title: 'Batch-Test-3'" + f"ID: {label_id1}, Title: '{label1_title}'", + f"ID: {label_id2}, Title: '{label2_title}'", + f"ID: {label_id3}, Title: '{label3_title}'" ] ) From 00a8969e6637628db7ab8c1dc1452dd8621ab4a8 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 11 Sep 2025 22:26:34 +0800 Subject: [PATCH 34/36] Updated python version to 3.10 and up --- .github/workflows/python-app.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 9fbba7c..cf64124 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -20,7 +20,7 @@ jobs: strategy: matrix: - python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"] + python_version: ["3.10", "3.11", "3.12", "3.13"] os: [ubuntu-latest, windows-latest] runs-on: ${{ matrix.os }} From 9ba97ff7cf0ac84a289e29dc123fcc7b3418a85e Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 11 Sep 2025 22:31:16 +0800 Subject: [PATCH 35/36] Updated fault mapping change in handling missing suites --- trcli/api/api_request_handler.py | 2 +- trcli/constants.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index e748cc4..6b7f445 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -160,7 +160,7 @@ def resolve_suite_id_using_name(self, project_id: int) -> Tuple[int, str]: return ( (suite_id, "") if suite_id != -1 - else (-1, FAULT_MAPPING["missing_suite"].format(suite_name=suite_name)) + else (-1, FAULT_MAPPING["missing_suite_by_name"].format(suite_name=suite_name)) ) else: return -1, error diff --git a/trcli/constants.py b/trcli/constants.py index 41b5482..f9541c7 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -33,6 +33,7 @@ file_open_issue="Error occurred while opening the file ({file_path}). " "Make sure that the file exists or the path is correct.", missing_suite="Suite with ID '{suite_id}' does not exist in TestRail.", + missing_suite_by_name="Suite with name '{suite_name}' does not exist in TestRail.", no_user_agreement="User did not agree to create '{type}' automatically. Exiting.", error_checking_project="Error detected while checking a project: '{error_message}'", error_while_adding_suite="Error detected while adding suite: '{error_message}'", From e869adf3ce0419bd341c3fcb92192a16a5caeba9 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Thu, 11 Sep 2025 22:32:21 +0800 Subject: [PATCH 36/36] Updated functional tests and changelog for v1.12.0 release --- CHANGELOG.MD | 10 ++++++++-- tests_e2e/test_end2end.py | 24 +++--------------------- 2 files changed, 11 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index ca6fcdf..875e71e 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -8,10 +8,16 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb ## [1.12.0] -_released 08-12-2025 +_released 09-11-2025 ### Added - - Added Label management facility for Projects, Test Runs and Test Cases + - Added Label management facility for Projects, Test Cases and Tests + - Added Reference management feature for cases using 'references cases' command and add_run command using --run-refs and --run-refs-action + +### Fixed + - Fixed check_suite_id to handle paginated results + - Fixed test run detachment when updating a test run without --milestone-id + - Updated supported Click library to latest stable version 8.2.1 ## [1.11.0] diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index da6ec20..d042666 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -1528,25 +1528,7 @@ def test_labels_tests_full_workflow(self): ] ) - # Test 3: List tests by label ID from a specific run - # Use a realistic run ID - for E2E testing we'll use run ID 1 as a common test run - list_tests_output = _run_cmd(f""" -trcli -y \\ - -h {self.TR_INSTANCE} \\ - --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ - labels tests list \\ - --run-id "1" \\ - --ids "{label_id}" - """) - _assert_contains( - list_tests_output, - [ - f"Retrieving tests from run IDs: 1 with label IDs: {label_id}...", - "matching test(s):" - ] - ) - - # Test 4: Get test labels for specific tests + # Test 3: Get test labels for specific tests get_test_labels_output = _run_cmd(f""" trcli -y \\ -h {self.TR_INSTANCE} \\ @@ -1593,7 +1575,7 @@ def test_labels_tests_validation_errors(self): assert return_code != 0 _assert_contains( title_error_output, - ["Error: Label title must be 20 characters or less."] + ["exceeds 20 character limit and will be skipped."] ) # Test missing test-ids and file @@ -1649,7 +1631,7 @@ def test_labels_tests_help_commands(self): tests_add_help_output, [ "Usage: trcli labels tests add [OPTIONS]", - "Add a label to tests", + "Add label(s) to tests", "--test-ids", "--test-id-file", "--title"