From 9bed835a8d27445aa0549250a0f48db79d7fed16 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 16 Sep 2025 15:12:37 +0800 Subject: [PATCH 1/6] Update files for 1.12.1 release --- CHANGELOG.MD | 10 ++++++++++ README.md | 8 ++++---- trcli/__init__.py | 2 +- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 875e71e..14de0fd 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -6,6 +6,16 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb - **MINOR**: New features that are backward-compatible. - **PATCH**: Bug fixes or minor changes that do not affect backward compatibility. +## [1.12.1] + +_released 09-18-2025 + +### Added + - Added failed automated tests assignment using --assign option + +### Fixed + - Fixed an issue where JUnit parser fails to detect test case IDs if testcase name is at the beginning or it contains parentheses at the end + ## [1.12.0] _released 09-11-2025 diff --git a/README.md b/README.md index 49670ac..a2cfa73 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.12.0 +TestRail CLI v1.12.1 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) @@ -47,7 +47,7 @@ CLI general reference -------- ```shell $ trcli --help -TestRail CLI v1.12.0 +TestRail CLI v1.12.1 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli [OPTIONS] COMMAND [ARGS]... @@ -1039,7 +1039,7 @@ Options: ### Reference ```shell $ trcli add_run --help -TestRail CLI v1.12.0 +TestRail CLI v1.12.1 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli add_run [OPTIONS] @@ -1163,7 +1163,7 @@ providing you with a solid base of test cases, which you can further expand on T ### Reference ```shell $ trcli parse_openapi --help -TestRail CLI v1.12.0 +TestRail CLI v1.12.1 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli parse_openapi [OPTIONS] diff --git a/trcli/__init__.py b/trcli/__init__.py index b518f6e..438a38d 100644 --- a/trcli/__init__.py +++ b/trcli/__init__.py @@ -1 +1 @@ -__version__ = "1.12.0" +__version__ = "1.12.1" From f04ee82b55f88bf55f4e0202668d2e874d34770d Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 16 Sep 2025 16:02:40 +0800 Subject: [PATCH 2/6] Update parser to account for JUnit 5 default test names that ends with parentheses --- trcli/data_classes/data_parsers.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/trcli/data_classes/data_parsers.py b/trcli/data_classes/data_parsers.py index 55cfd81..510ac51 100644 --- a/trcli/data_classes/data_parsers.py +++ b/trcli/data_classes/data_parsers.py @@ -19,6 +19,7 @@ def parse_name_with_id(case_name: str) -> Tuple[int, str]: - "[C123] my test case" - "my test case [C123]" - "module 1 [C123] my test case" + - "my_test_case_C123()" (JUnit 5 support) :param case_name: Name of the test case :return: Tuple with test case ID and test case name without the ID @@ -29,9 +30,10 @@ def parse_name_with_id(case_name: str) -> Tuple[int, str]: for idx, part in enumerate(parts): if part.lower().startswith("c") and len(part) > 1: id_part = part[1:] - if id_part.isnumeric(): + id_part_clean = re.sub(r'\(.*\)$', '', id_part) + if id_part_clean.isnumeric(): parts_copy.pop(idx) - return int(id_part), char.join(parts_copy) + return int(id_part_clean), char.join(parts_copy) results = re.findall(r"\[(.*?)\]", case_name) for result in results: From 84609b044a777dcc95af78c7d7aa6d5324515f01 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 16 Sep 2025 16:05:01 +0800 Subject: [PATCH 3/6] TRCLI-159 Added unit test and json and xml test data for junit matchers parser --- .../test_data/XML/junit5_parentheses_test.xml | 18 ++++ .../json/junit5_parentheses_test.json | 101 ++++++++++++++++++ tests/test_matchers_parser.py | 101 ++++++++++++++++++ 3 files changed, 220 insertions(+) create mode 100644 tests/test_data/XML/junit5_parentheses_test.xml create mode 100644 tests/test_data/json/junit5_parentheses_test.json create mode 100644 tests/test_matchers_parser.py diff --git a/tests/test_data/XML/junit5_parentheses_test.xml b/tests/test_data/XML/junit5_parentheses_test.xml new file mode 100644 index 0000000..c01419d --- /dev/null +++ b/tests/test_data/XML/junit5_parentheses_test.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff --git a/tests/test_data/json/junit5_parentheses_test.json b/tests/test_data/json/junit5_parentheses_test.json new file mode 100644 index 0000000..574ecf2 --- /dev/null +++ b/tests/test_data/json/junit5_parentheses_test.json @@ -0,0 +1,101 @@ +{ + "name": "JUnit 5 Test Suite with Parentheses", + "testsections": [ + { + "name": "JUnit5ParenthesesTests", + "testcases": [ + { + "title": "test_name", + "case_id": 120013, + "result": { + "case_id": 120013, + "elapsed": 1.5, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "" + }, + "custom_automation_id": "com.example.MyTests.test_name_C120013()", + "case_fields": {} + }, + { + "title": "testMethod", + "case_id": 123, + "result": { + "case_id": 123, + "elapsed": 2.1, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "" + }, + "custom_automation_id": "com.example.MyTests.testMethod_C123()", + "case_fields": {} + }, + { + "title": "complexTest", + "case_id": 456, + "result": { + "case_id": 456, + "elapsed": 0.8, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "" + }, + "custom_automation_id": "com.example.MyTests.complexTest_C456(String param, int value)", + "case_fields": {} + }, + { + "title": "test_name()", + "case_id": 789, + "result": { + "case_id": 789, + "elapsed": 1.2, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "" + }, + "custom_automation_id": "com.example.MyTests.C789_test_name()", + "case_fields": {} + }, + { + "title": "test_with_brackets()", + "case_id": 999, + "result": { + "case_id": 999, + "elapsed": 0.9, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "" + }, + "custom_automation_id": "com.example.MyTests.[C999] test_with_brackets()", + "case_fields": {} + }, + { + "title": "test_name", + "case_id": 555, + "result": { + "case_id": 555, + "elapsed": 1.0, + "attachments": [], + "result_fields": {}, + "custom_step_results": [], + "status_id": 1, + "comment": "" + }, + "custom_automation_id": "com.example.MyTests.test_name_C555", + "case_fields": {} + } + ] + } + ], + "source": null +} diff --git a/tests/test_matchers_parser.py b/tests/test_matchers_parser.py new file mode 100644 index 0000000..aba67c8 --- /dev/null +++ b/tests/test_matchers_parser.py @@ -0,0 +1,101 @@ +import pytest +from trcli.data_classes.data_parsers import MatchersParser + + +class TestMatchersParser: + """Test cases for MatchersParser.parse_name_with_id method""" + + @pytest.mark.parametrize( + "test_input, expected_id, expected_name", + [ + # Basic patterns (existing functionality) + ("C123 my test case", 123, "my test case"), + ("my test case C123", 123, "my test case"), + ("C123_my_test_case", 123, "my_test_case"), + ("my_test_case_C123", 123, "my_test_case"), + ("module_1_C123_my_test_case", 123, "module_1_my_test_case"), + ("[C123] my test case", 123, "my test case"), + ("my test case [C123]", 123, "my test case"), + ("module 1 [C123] my test case", 123, "module 1 my test case"), + + # JUnit 5 patterns with parentheses (new functionality) + ("test_name_C120013()", 120013, "test_name"), + ("testMethod_C123()", 123, "testMethod"), + ("my_test_C456()", 456, "my_test"), + ("C789_test_name()", 789, "test_name()"), + ("C100 test_name()", 100, "test_name()"), + + # JUnit 5 patterns with parameters + ("test_name_C120013(TestParam)", 120013, "test_name"), + ("test_C456(param1, param2)", 456, "test"), + ("complexTest_C999(String param, int value)", 999, "complexTest"), + + # Edge cases with parentheses + ("myTest_C789()", 789, "myTest"), + ("C200_method()", 200, "method()"), + ("[C300] test_case()", 300, "test_case()"), + ("test [C400] method()", 400, "test method()"), + + # Cases that should not match + ("test_name_C()", None, "test_name_C()"), + ("test_name_123()", None, "test_name_123()"), + ("test_name", None, "test_name"), + ("C_test_name", None, "C_test_name"), + ("test_Cabc_name", None, "test_Cabc_name"), + + # Case sensitivity + ("c123_test_name", 123, "test_name"), + ("test_name_c456", 456, "test_name"), + ("[c789] test_name", 789, "test_name"), + ] + ) + def test_parse_name_with_id_patterns(self, test_input, expected_id, expected_name): + """Test various patterns of test name parsing including JUnit 5 parentheses support""" + case_id, case_name = MatchersParser.parse_name_with_id(test_input) + assert case_id == expected_id, f"Expected ID {expected_id}, got {case_id} for input '{test_input}'" + assert case_name == expected_name, f"Expected name '{expected_name}', got '{case_name}' for input '{test_input}'" + + def test_parse_name_with_id_junit5_specific(self): + """Specific test cases for JUnit 5 parentheses issue reported by user""" + # The exact examples from the user's issue + junit5_cases = [ + ("test_name_C120013()", 120013, "test_name"), # Should work now + ("test_name_C120013", 120013, "test_name"), # Should still work + ("C120013_test_name()", 120013, "test_name()"), # Should work + ] + + for test_case, expected_id, expected_name in junit5_cases: + case_id, case_name = MatchersParser.parse_name_with_id(test_case) + assert case_id == expected_id, f"JUnit 5 case failed: {test_case}" + assert case_name == expected_name, f"JUnit 5 name failed: {test_case}" + + def test_parse_name_with_id_regression(self): + """Ensure existing functionality still works (regression test)""" + # Test all the patterns mentioned in the docstring + existing_patterns = [ + ("C123 my test case", 123, "my test case"), + ("my test case C123", 123, "my test case"), + ("C123_my_test_case", 123, "my_test_case"), + ("my_test_case_C123", 123, "my_test_case"), + ("module_1_C123_my_test_case", 123, "module_1_my_test_case"), + ("[C123] my test case", 123, "my test case"), + ("my test case [C123]", 123, "my test case"), + ("module 1 [C123] my test case", 123, "module 1 my test case"), + ] + + for test_case, expected_id, expected_name in existing_patterns: + case_id, case_name = MatchersParser.parse_name_with_id(test_case) + assert case_id == expected_id, f"Regression failed for: {test_case}" + assert case_name == expected_name, f"Regression name failed for: {test_case}" + + def test_parse_name_with_id_empty_and_none(self): + """Test edge cases with empty or None inputs""" + # Empty string + case_id, case_name = MatchersParser.parse_name_with_id("") + assert case_id is None + assert case_name == "" + + # String with just spaces + case_id, case_name = MatchersParser.parse_name_with_id(" ") + assert case_id is None + assert case_name == " " From cd112436cc173f269734a42b4a851072ebb6e205 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 17 Sep 2025 20:18:16 +0800 Subject: [PATCH 4/6] TRCLI-36 Added implementation for auto assign feature for failed test results --- trcli/api/api_request_handler.py | 65 ++++++++++++++++++- trcli/api/results_uploader.py | 76 ++++++++++++++++++++++- trcli/cli.py | 3 +- trcli/commands/cmd_parse_junit.py | 6 ++ trcli/commands/results_parser_helpers.py | 2 + trcli/data_providers/api_data_provider.py | 20 +++++- 6 files changed, 167 insertions(+), 5 deletions(-) diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 6b7f445..da6f0bc 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -550,13 +550,19 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: """ Adds one or more new test results. :run_id: run id - :returns: Tuple with dict created resources and error string. + :returns: Tuple with dict created resources, error string, and results count. """ responses = [] error_message = "" + # Get pre-validated user IDs if available + user_ids = getattr(self.environment, '_validated_user_ids', []) + add_results_data_chunks = self.data_provider.add_results_for_cases( - self.environment.batch_size + self.environment.batch_size, user_ids ) + # Get assigned count from data provider + assigned_count = getattr(self.data_provider, '_assigned_count', 0) + results_amount = sum( [len(results["results"]) for results in add_results_data_chunks] ) @@ -600,8 +606,18 @@ def add_results(self, run_id: int) -> Tuple[List, str, int]: self.upload_attachments(report_results_w_attachments, results, run_id) else: self.environment.log(f"No attachments found to upload.") + + # Log assignment results if assignment was performed + if user_ids: + total_failed = getattr(self.data_provider, '_total_failed_count', assigned_count) + if assigned_count > 0: + self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") + else: + self.environment.log(f"Assigning failed results: 0/0, Done.") + return responses, error_message, progress_bar.n + def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, str]: responses = [] error_message = "" @@ -706,6 +722,51 @@ def retrieve_results_after_cancelling(futures) -> list: responses.append(response) return responses + def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: + """ + Validates a user email and returns the user ID if valid. + + :param email: User email to validate + :returns: Tuple with user ID (or None if not found) and error message + """ + if not email or not email.strip(): + return None, "Email cannot be empty" + + email = email.strip() + # Use proper URL encoding for the query parameter + import urllib.parse + encoded_email = urllib.parse.quote_plus(email) + response = self.client.send_get(f"get_user_by_email&email={encoded_email}") + + if response.error_message: + # Map TestRail's email validation error to our expected format + if "Field :email is not a valid email address" in response.error_message: + return None, f"User not found: {email}" + return None, response.error_message + + if response.status_code == 200: + try: + user_data = response.response_text + if isinstance(user_data, dict) and 'id' in user_data: + return user_data['id'], "" + else: + return None, f"Invalid response format for user: {email}" + except (KeyError, TypeError): + return None, f"Invalid response format for user: {email}" + elif response.status_code == 400: + # Check if the response contains the email validation error + if (hasattr(response, 'response_text') and response.response_text and + isinstance(response.response_text, dict) and + "Field :email is not a valid email address" in str(response.response_text.get('error', ''))): + return None, f"User not found: {email}" + return None, f"User not found: {email}" + else: + # For other status codes, check if it's the email validation error + if (hasattr(response, 'response_text') and response.response_text and + "Field :email is not a valid email address" in str(response.response_text)): + return None, f"User not found: {email}" + return None, f"API error (status {response.status_code}) when validating user: {email}" + def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: case_body = case.to_dict() active_field = getattr(self, "_active_automation_id_field", None) diff --git a/trcli/api/results_uploader.py b/trcli/api/results_uploader.py index 7d28f55..c471c0c 100644 --- a/trcli/api/results_uploader.py +++ b/trcli/api/results_uploader.py @@ -17,7 +17,7 @@ class ResultsUploader(ProjectBasedClient): def __init__(self, environment: Environment, suite: TestRailSuite, skip_run: bool = False): super().__init__(environment, suite) self.skip_run = skip_run - if self.environment.special_parser == "saucectl": + if hasattr(self.environment, 'special_parser') and self.environment.special_parser == "saucectl": self.run_name += f" ({suite.name})" def upload_results(self): @@ -30,6 +30,15 @@ def upload_results(self): start = time.time() results_amount = None + # Validate user emails early if --assign is specified + try: + assign_value = getattr(self.environment, 'assign_failed_to', None) + if assign_value is not None and str(assign_value).strip(): + self._validate_and_store_user_ids() + except (AttributeError, TypeError): + # Skip validation if there are any issues with the assign_failed_to attribute + pass + self.resolve_project() suite_id, suite_added = self.resolve_suite() @@ -117,6 +126,7 @@ def upload_results(self): ) self.environment.log("\n".join(revert_logs)) exit(1) + if self.environment.close_run: self.environment.log("Closing test run. ", new_line=False) response, error_message = self.api_request_handler.close_run(run_id) @@ -128,6 +138,70 @@ def upload_results(self): stop = time.time() if results_amount: self.environment.log(f"Submitted {results_amount} test results in {stop - start:.1f} secs.") + + # Exit with error if there were invalid users (after processing valid ones) + try: + has_invalid = getattr(self.environment, '_has_invalid_users', False) + if has_invalid is True: # Explicitly check for True to avoid mock object issues + exit(1) + except (AttributeError, TypeError): + # Skip exit if there are any issues with the attribute + pass + + def _validate_and_store_user_ids(self): + """ + Validates user emails from --assign option and stores valid user IDs. + For mixed valid/invalid users, warns about invalid ones but continues with valid ones. + Exits only if NO valid users are found. + """ + try: + assign_value = getattr(self.environment, 'assign_failed_to', None) + if assign_value is None or not str(assign_value).strip(): + return + except (AttributeError, TypeError): + return + + # Check for empty or whitespace-only values + assign_str = str(assign_value) + if not assign_str.strip(): + self.environment.elog("Error: --assign option requires at least one user email") + exit(1) + + emails = [email.strip() for email in assign_str.split(',') if email.strip()] + + if not emails: + self.environment.elog("Error: --assign option requires at least one user email") + exit(1) + + valid_user_ids = [] + invalid_users = [] + + for email in emails: + user_id, error_msg = self.api_request_handler.get_user_by_email(email) + if user_id is None: + invalid_users.append(email) + if "User not found" not in error_msg: + # If it's not a "user not found" error, it might be an API issue + self.environment.elog(f"Error: {error_msg}") + exit(1) + else: + valid_user_ids.append(user_id) + + # Handle invalid users + if invalid_users: + for invalid_user in invalid_users: + self.environment.elog(f"Error: User not found: {invalid_user}") + + # Store valid user IDs for processing, but mark that we should exit with error later + self.environment._has_invalid_users = True + + # If ALL users are invalid, exit immediately + if not valid_user_ids: + exit(1) + + # Store valid user IDs for later use + self.environment._validated_user_ids = valid_user_ids + def add_missing_sections(self, project_id: int) -> Tuple[List, int]: """ diff --git a/trcli/cli.py b/trcli/cli.py index 3fa81cc..07233a4 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -69,7 +69,8 @@ def __init__(self, cmd="parse_junit"): self.run_include_all = None self.auto_close_run = None self.run_refs = None - self.proxy = None # Add proxy related attributes + self.proxy = None + self.assign_failed_to = None # Add proxy related attributes self.noproxy = None self.proxy_user = None diff --git a/trcli/commands/cmd_parse_junit.py b/trcli/commands/cmd_parse_junit.py index fb7b8b0..218fb6e 100644 --- a/trcli/commands/cmd_parse_junit.py +++ b/trcli/commands/cmd_parse_junit.py @@ -21,6 +21,12 @@ type=click.Choice(["junit", "saucectl"], case_sensitive=False), help="Optional special parser option for specialized JUnit reports." ) +@click.option( + "-a", "--assign", + "assign_failed_to", + metavar="", + help="Comma-separated list of user emails to assign failed test results to." +) @click.pass_context @pass_environment def cli(environment: Environment, context: click.Context, *args, **kwargs): diff --git a/trcli/commands/results_parser_helpers.py b/trcli/commands/results_parser_helpers.py index f3a3c28..da528e2 100644 --- a/trcli/commands/results_parser_helpers.py +++ b/trcli/commands/results_parser_helpers.py @@ -7,6 +7,7 @@ def print_config(env: Environment): + assign_info = f"Yes ({env.assign_failed_to})" if hasattr(env, 'assign_failed_to') and env.assign_failed_to and env.assign_failed_to.strip() else "No" env.log(f"Parser Results Execution Parameters" f"\n> Report file: {env.file}" f"\n> Config file: {env.config}" @@ -15,6 +16,7 @@ def print_config(env: Environment): f"\n> Run title: {env.title}" f"\n> Update run: {env.run_id if env.run_id else 'No'}" f"\n> Add to milestone: {env.milestone_id if env.milestone_id else 'No'}" + f"\n> Auto-assign failures: {assign_info}" f"\n> Auto-create entities: {env.auto_creation_response}") diff --git a/trcli/data_providers/api_data_provider.py b/trcli/data_providers/api_data_provider.py index 21a962b..e3aacd7 100644 --- a/trcli/data_providers/api_data_provider.py +++ b/trcli/data_providers/api_data_provider.py @@ -119,18 +119,36 @@ def add_run( body["milestone_id"] = milestone_id return body - def add_results_for_cases(self, bulk_size): + def add_results_for_cases(self, bulk_size, user_ids=None): """Return bodies for adding results for cases. Returns bodies for results that already have case ID.""" testcases = [sections.testcases for sections in self.suites_input.testsections] bodies = [] + user_index = 0 + assigned_count = 0 + total_failed_count = 0 for sublist in testcases: for case in sublist: if case.case_id is not None: case.result.add_global_result_fields(self.result_fields) + + # Count failed tests + if case.result.status_id == 5: # status_id 5 = Failed + total_failed_count += 1 + + # Assign failed tests to users in round-robin fashion if user_ids provided + if user_ids: + case.result.assignedto_id = user_ids[user_index % len(user_ids)] + user_index += 1 + assigned_count += 1 + bodies.append(case.result.to_dict()) + # Store counts for logging (we'll access this from the api_request_handler) + self._assigned_count = assigned_count if user_ids else 0 + self._total_failed_count = total_failed_count + result_bulks = ApiDataProvider.divide_list_into_bulks( bodies, bulk_size=bulk_size, From 3b6b9f1d73f25c50948645c44082cdf2ef1eec5b Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 17 Sep 2025 20:20:10 +0800 Subject: [PATCH 5/6] TRCLI-36 Added unit and functional tests for auto assign feature for failed tests, also updated README file --- README.md | 46 ++++ tests/test_results_uploader.py | 3 + .../reports_junit/assign_test_failures.xml | 39 ++++ tests_e2e/test_end2end.py | 205 ++++++++++++++++++ 4 files changed, 293 insertions(+) create mode 100644 tests_e2e/reports_junit/assign_test_failures.xml diff --git a/README.md b/README.md index a2cfa73..d0e2936 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,8 @@ Options: --allow-ms Allows using milliseconds for elapsed times. --special-parser Optional special parser option for specialized JUnit reports. + -a, --assign Comma-separated list of user emails to assign failed test + results to. --help Show this message and exit. ``` @@ -266,6 +268,50 @@ case_result_statuses: ``` You can find statuses ids for your project using following endpoint: ```/api/v2/get_statuses``` + +### Auto-Assigning Failed Tests + +The `--assign` (or `-a`) option allows you to automatically assign failed test results to specific TestRail users. This feature is particularly useful in CI/CD environments where you want to automatically assign failures to responsible team members for investigation. + +#### Usage + +```shell +# Assign failed tests to a single user +$ trcli parse_junit -f results.xml --assign user@example.com \ + --host https://yourinstance.testrail.io --username --password \ + --project "Your Project" + +# Assign failed tests to multiple users (round-robin distribution) +$ trcli parse_junit -f results.xml --assign "user1@example.com,user2@example.com,user3@example.com" \ + --host https://yourinstance.testrail.io --username --password \ + --project "Your Project" + +# Short form using -a +$ trcli parse_junit -f results.xml -a user@example.com \ + --host https://yourinstance.testrail.io --username --password \ + --project "Your Project" +``` + +#### Example Output + +```shell +Parser Results Execution Parameters +> Report file: results.xml +> Config file: /path/to/config.yml +> TestRail instance: https://yourinstance.testrail.io (user: your@email.com) +> Project: Your Project +> Run title: Automated Test Run +> Update run: No +> Add to milestone: No +> Auto-assign failures: Yes (user1@example.com,user2@example.com) +> Auto-create entities: True + +Creating test run. Done. +Adding results: 100%|████████████| 25/25 [00:02<00:00, 12.5results/s] +Assigning failed results: 3/3, Done. +Submitted 25 test results in 2.1 secs. +``` + ### Exploring other features #### General features diff --git a/tests/test_results_uploader.py b/tests/test_results_uploader.py index 235bea5..4b71d7f 100644 --- a/tests/test_results_uploader.py +++ b/tests/test_results_uploader.py @@ -36,6 +36,9 @@ def result_uploader_data_provider(self, mocker): environment.run_id = None environment.file = "results.xml" environment.case_matcher = MatchersParser.AUTO + environment.assign_failed_to = None + environment._has_invalid_users = False + environment._validated_user_ids = [] junit_file_parser = mocker.patch.object(JunitParser, "parse_file") api_request_handler = mocker.patch( diff --git a/tests_e2e/reports_junit/assign_test_failures.xml b/tests_e2e/reports_junit/assign_test_failures.xml new file mode 100644 index 0000000..d31f939 --- /dev/null +++ b/tests_e2e/reports_junit/assign_test_failures.xml @@ -0,0 +1,39 @@ + + + + + + + + + Expected validation to pass, but it failed with error: Invalid input + + + + + + + Network call failed after 30 seconds timeout + + + + + + + An unexpected runtime error occurred during test execution + + + + + + + Expected value 'expected' but got 'actual' + + + + + + + + + diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index d042666..2879972 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -1772,4 +1772,209 @@ def test_references_cases_error_scenarios(self): long_refs_output, ["exceeds 2000 character limit"] ) + + # ==================== ASSIGN FEATURE TESTS ==================== + + def test_assign_failures_single_user(self): + """Test --assign feature with single user""" + # Note: This test assumes a valid TestRail user exists in the instance + # In a real environment, you would use actual user emails + output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --title "[CLI-E2E-Tests] Assign Failures - Single User" \\ + --assign "trcli@gurock.io" \\ + -f "reports_junit/assign_test_failures.xml" + """) + _assert_contains( + output, + [ + "Auto-assign failures: Yes (trcli@gurock.io)", + "Processed 6 test cases in section [ASSIGNTESTSUITE]", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted 6 test results in", + "Assigning failed results: 4/4, Done." + ] + ) + + def test_assign_failures_multiple_users(self): + """Test --assign feature with multiple users (round-robin assignment)""" + output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --title "[CLI-E2E-Tests] Assign Failures - Multiple Users" \\ + --assign "trcli@gurock.io,trcli@testrail.com" \\ + -f "reports_junit/assign_test_failures.xml" + """) + _assert_contains( + output, + [ + "Auto-assign failures: Yes (trcli@gurock.io,trcli@testrail.com)", + "Processed 6 test cases in section [ASSIGNTESTSUITE]", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted 6 test results in", + "Assigning failed results: 4/4, Done." + ] + ) + + def test_assign_failures_short_form(self): + """Test --assign feature using -a short form""" + output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --title "[CLI-E2E-Tests] Assign Failures - Short Form" \\ + -a "trcli@gurock.io" \\ + -f "reports_junit/assign_test_failures.xml" + """) + _assert_contains( + output, + [ + "Auto-assign failures: Yes (trcli@gurock.io)", + "Processed 6 test cases in section [ASSIGNTESTSUITE]", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted 6 test results in", + "Assigning failed results: 4/4, Done." + ] + ) + + def test_assign_failures_without_assign_option(self): + """Test that normal operation works without --assign option""" + output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --title "[CLI-E2E-Tests] No Assign Option" \\ + -f "reports_junit/assign_test_failures.xml" + """) + _assert_contains( + output, + [ + "Auto-assign failures: No", + "Processed 6 test cases in section [ASSIGNTESTSUITE]", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted 6 test results in" + ] + ) + # Should NOT contain assignment message + assert "Assigning failed results:" not in output + + def test_assign_failures_invalid_user(self): + """Test --assign feature with invalid user email""" + output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --title "[CLI-E2E-Tests] Assign Failures - Invalid User" \\ + --assign "invalid.user@nonexistent.com" \\ + -f "reports_junit/assign_test_failures.xml" + """) + + assert return_code != 0 + _assert_contains( + output, + [ + "Error: User not found: invalid.user@nonexistent.com" + ] + ) + + def test_assign_failures_mixed_valid_invalid_users(self): + """Test --assign feature with mix of valid and invalid users""" + output, return_code = _run_cmd_allow_failure(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --title "[CLI-E2E-Tests] Assign Failures - Mixed Users" \\ + --assign "trcli@gurock.io,invalid.user@nonexistent.com" \\ + -f "reports_junit/assign_test_failures.xml" + """) + + assert return_code != 0 + _assert_contains( + output, + [ + "Error: User not found: invalid.user@nonexistent.com" + ] + ) + + def test_assign_failures_whitespace_handling(self): + """Test --assign feature with whitespace in email list""" + output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --title "[CLI-E2E-Tests] Assign Failures - Whitespace" \\ + --assign " trcli@gurock.io , trcli@testrail.com " \\ + -f "reports_junit/assign_test_failures.xml" + """) + _assert_contains( + output, + [ + "Auto-assign failures: Yes ( trcli@gurock.io , trcli@testrail.com )", + "Processed 6 test cases in section [ASSIGNTESTSUITE]", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted 6 test results in", + "Assigning failed results: 4/4, Done." + ] + ) + + def test_assign_failures_help_documentation(self): + """Test that --assign option appears in help documentation""" + help_output = _run_cmd("trcli parse_junit --help") + _assert_contains( + help_output, + [ + "-a, --assign", + "Comma-separated list of user emails to assign failed", + "test results to." + ] + ) + + def test_assign_failures_with_existing_run(self): + """Test --assign feature when updating an existing run""" + # First create a run + create_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --title "[CLI-E2E-Tests] Assign Failures - Update Run" \\ + -f "reports_junit/generic_ids_auto.xml" + """) + + # Extract run ID from output + import re + run_id_match = re.search(r'runs/view/(\d+)', create_output) + assert run_id_match, "Could not extract run ID from output" + run_id = run_id_match.group(1) + + # Update the run with failed tests and assignment + update_output = _run_cmd(f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_junit \\ + --run-id {run_id} \\ + --title "[CLI-E2E-Tests] Assign Failures - Update Run" \\ + --assign "trcli@gurock.io" \\ + -f "reports_junit/assign_test_failures.xml" + """) + _assert_contains( + update_output, + [ + "Auto-assign failures: Yes (trcli@gurock.io)", + f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view/{run_id}", + "Submitted 6 test results in", + "Assigning failed results: 4/4, Done." + ] + ) \ No newline at end of file From 214e429388080052d49fc7d48c49d9ff39be2e39 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Tue, 30 Sep 2025 13:14:49 +0800 Subject: [PATCH 6/6] Update changelog for 1.12.1 release --- CHANGELOG.MD | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 14de0fd..b8306f5 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -8,13 +8,13 @@ This project adheres to [Semantic Versioning](https://semver.org/). Version numb ## [1.12.1] -_released 09-18-2025 +_released 09-30-2025 ### Added - Added failed automated tests assignment using --assign option ### Fixed - - Fixed an issue where JUnit parser fails to detect test case IDs if testcase name is at the beginning or it contains parentheses at the end + - Fixed an issue where JUnit parser fails to detect test case IDs at the beginning or it contains parentheses at the end of testcase names ## [1.12.0]