diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 215d98a..e4c9881 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -23,30 +23,31 @@ jobs: PR_BODY="${{ github.event.pull_request.body }}" # Check if PR title or body contains issue reference - # Accepts: TRCLI-### (JIRA), GIT-### (GitHub), #123 (GitHub), issues/123 - if echo "$PR_TITLE $PR_BODY" | grep -qE "TRCLI-[0-9]+|GIT-[0-9]+|#[0-9]+|issues/[0-9]+"; then - echo "issue_found=true" >> $GITHUB_OUTPUT + if echo "$PR_TITLE $PR_BODY" | grep -qE "(TRCLI-[0-9]+|GIT-[0-9]+|#[0-9]+|issues/[0-9]+)"; then + echo "issue_found=true" >> "$GITHUB_OUTPUT" - # Extract the issue key/number if echo "$PR_TITLE $PR_BODY" | grep -qE "TRCLI-[0-9]+"; then ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "TRCLI-[0-9]+" | head -1) - echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT - echo "issue_type=JIRA" >> $GITHUB_OUTPUT + echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT" + echo "issue_type=JIRA" >> "$GITHUB_OUTPUT" + elif echo "$PR_TITLE $PR_BODY" | grep -qE "GIT-[0-9]+"; then ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "GIT-[0-9]+" | head -1) - echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT - echo "issue_type=GitHub" >> $GITHUB_OUTPUT + echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT" + echo "issue_type=GitHub" >> "$GITHUB_OUTPUT" + elif echo "$PR_TITLE $PR_BODY" | grep -qE "#[0-9]+"; then ISSUE_KEY=$(echo "$PR_TITLE $PR_BODY" | grep -oE "#[0-9]+" | head -1) - echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT - echo "issue_type=GitHub" >> $GITHUB_OUTPUT + echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT" + echo "issue_type=GitHub" >> "$GITHUB_OUTPUT" + elif echo "$PR_BODY" | grep -qE "issues/[0-9]+"; then ISSUE_KEY=$(echo "$PR_BODY" | grep -oE "issues/[0-9]+" | head -1) - echo "issue_key=$ISSUE_KEY" >> $GITHUB_OUTPUT - echo "issue_type=GitHub" >> $GITHUB_OUTPUT + echo "issue_key=$ISSUE_KEY" >> "$GITHUB_OUTPUT" + echo "issue_type=GitHub" >> "$GITHUB_OUTPUT" fi else - echo "issue_found=false" >> $GITHUB_OUTPUT + echo "issue_found=false" >> "$GITHUB_OUTPUT" fi - name: Comment on PR if issue reference missing @@ -61,18 +62,15 @@ jobs: repo: context.repo.repo, body: `## ⚠️ Missing Issue Reference - This PR does not reference an issue. Please include a reference in either: + This PR does not reference an issue. Please include one. - **JIRA tickets:** - - PR title: "feat(api): TRCLI-123 Add new endpoint" - - PR description: "Resolves TRCLI-123" + **JIRA example:** + - TRCLI-123 - **GitHub issues:** - - PR title: "feat(api): GIT-123 Add new endpoint" - - PR description: "Resolves GIT-123" or "Fixes #123" - - Or link to the GitHub issue - - This helps with tracking and project management. Thank you!` + **GitHub examples:** + - GIT-123 + - Fixes #123 + - issues/123` }) - name: Check PR Description Completeness @@ -80,23 +78,22 @@ jobs: run: | PR_BODY="${{ github.event.pull_request.body }}" - # Check for required sections if echo "$PR_BODY" | grep -q "Issue being resolved"; then - echo "has_issue=true" >> $GITHUB_OUTPUT + echo "has_issue=true" >> "$GITHUB_OUTPUT" else - echo "has_issue=false" >> $GITHUB_OUTPUT + echo "has_issue=false" >> "$GITHUB_OUTPUT" fi if echo "$PR_BODY" | grep -q "Solution description"; then - echo "has_solution=true" >> $GITHUB_OUTPUT + echo "has_solution=true" >> "$GITHUB_OUTPUT" else - echo "has_solution=false" >> $GITHUB_OUTPUT + echo "has_solution=false" >> "$GITHUB_OUTPUT" fi if echo "$PR_BODY" | grep -q "Steps to test"; then - echo "has_test_steps=true" >> $GITHUB_OUTPUT + echo "has_test_steps=true" >> "$GITHUB_OUTPUT" else - echo "has_test_steps=false" >> $GITHUB_OUTPUT + echo "has_test_steps=false" >> "$GITHUB_OUTPUT" fi - name: Generate PR Validation Summary @@ -107,6 +104,7 @@ jobs: const issueFound = '${{ steps.check_issue.outputs.issue_found }}' === 'true'; const issueKey = '${{ steps.check_issue.outputs.issue_key }}'; const issueType = '${{ steps.check_issue.outputs.issue_type }}'; + const hasIssue = '${{ steps.check_description.outputs.has_issue }}' === 'true'; const hasSolution = '${{ steps.check_description.outputs.has_solution }}' === 'true'; const hasTestSteps = '${{ steps.check_description.outputs.has_test_steps }}' === 'true'; @@ -124,9 +122,9 @@ jobs: | Solution Description | ${hasSolution ? '✅ Present' : '⚠️ Missing'} | | Test Steps | ${hasTestSteps ? '✅ Present' : '⚠️ Missing'} | - ${issueFound && hasSolution && hasTestSteps ? '✅ All checks passed!' : '⚠️ Some optional sections are missing. Consider adding them for better review context.'} + ${issueFound && hasSolution && hasTestSteps + ? '✅ All checks passed!' + : '⚠️ Some optional sections are missing.'} `; - await core.summary - .addRaw(summary) - .write(); + await core.summary.addRaw(summary).write(); diff --git a/CHANGELOG.MD b/CHANGELOG.MD index a49c9ab..25c070d 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -13,6 +13,19 @@ _released 12-01-2025 ### Fixed - Added new BDD/Gherkin parser command parse_bdd for behavioral driven development-related testing +### Added + - **BDD Support for parse_junit**: Added `--special-parser bdd` option to group multiple JUnit scenarios into a single TestRail BDD test case; supports case ID extraction, BDD case validation and result aggregation + +## [1.12.5] + +_released 12-09-2025 + +### Fixed + - Added a new option --parallel-pagination for handling large test result uploads with optimized processing and improved error handling. + - Fixed an issue where adding labels to project fails using label add command + - Fixed an issue where failed attachment upload errors (e.g. due to file size being too large) is not displayed in standard output. + - Fixed an issue where test cases are deleted in existing test runs with configs in a test plan + ## [1.12.4] _released 11-03-2025 diff --git a/README.md b/README.md index adda450..40b5abe 100644 --- a/README.md +++ b/README.md @@ -33,22 +33,25 @@ trcli ``` You should get something like this: ``` -TestRail CLI v1.13.0 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) - - parse_bdd: Gherkin .feature files + - parse_cucumber: Cucumber JSON results (BDD) + - import_gherkin: Upload .feature files to TestRail BDD + - export_gherkin: Export BDD test cases as .feature files - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - - add_run: Create a new empty test run + - add_run: Create a new test run - labels: Manage labels (add, update, delete, list) + - references: Manage references (cases and runs) ``` CLI general reference -------- ```shell $ trcli --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli [OPTIONS] COMMAND [ARGS]... @@ -80,13 +83,18 @@ Options: 'username:password'. --noproxy Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1). + --parallel-pagination Enable parallel pagination for faster case fetching + (experimental). --help Show this message and exit. Commands: add_run Add a new test run in TestRail + export_gherkin Export BDD test case from TestRail as .feature file + import_gherkin Upload Gherkin .feature file to TestRail labels Manage labels in TestRail + parse_cucumber Parse Cucumber JSON results and upload to TestRail + parse_gherkin Parse Gherkin .feature file locally parse_junit Parse JUnit report and upload results to TestRail - parse_bdd Parse Gherkin .feature files and upload results to TestRail parse_openapi Parse OpenAPI spec and create cases in TestRail parse_robot Parse Robot Framework report and upload results to TestRail references Manage references in TestRail @@ -323,6 +331,380 @@ Assigning failed results: 3/3, Done. Submitted 25 test results in 2.1 secs. ``` +## Behavior-Driven Development (BDD) Support + +The TestRail CLI provides comprehensive support for Behavior-Driven Development workflows using Gherkin syntax. The BDD features enable you to manage test cases written in Gherkin format, execute BDD tests with various frameworks (Cucumber, Behave, pytest-bdd, etc.), and seamlessly upload results to TestRail. + +### BDD Commands Overview + +The TestRail CLI provides four commands for complete BDD workflow management: + +| Command | Purpose | Use Case | +|---------|---------|----------| +| `import_gherkin` | Import .feature files to create test cases | Create BDD test cases in TestRail from existing .feature files | +| `export_gherkin` | Export test cases as .feature files | Extract test cases from TestRail for automation | +| `parse_cucumber` | Parse Cucumber JSON and upload results | Upload test results from Cucumber/Behave/pytest-bdd execution | +| `parse_gherkin` | Parse .feature files locally (no upload) | Validate syntax, convert to JSON, preview TestRail structure | + +### Uploading Cucumber/BDD Test Results + +The `parse_cucumber` command allows you to upload automated test results from BDD frameworks that generate Cucumber JSON format, including: +- **Cucumber (Java, JavaScript, Ruby)** +- **Behave (Python)** +- **pytest-bdd (Python)** +- **SpecFlow (.NET)** (with Cucumber JSON output) +- **Cucumber-JVM (Java)** + +#### Reference +```shell +$ trcli parse_cucumber --help +Usage: trcli parse_cucumber [OPTIONS] + + Parse Cucumber JSON results and upload to TestRail + +Options: + -f, --file Filename and path. + --close-run Close the newly created run + --title Title of Test Run to be created in TestRail. + --case-matcher Mechanism to match cases between the report and + TestRail. + --suite-id Suite ID to submit results to. [x>=1] + --suite-name Suite name to submit results to. + --run-id Run ID for the results they are reporting. [x>=1] + --plan-id Plan ID with which the Test Run will be associated. [x>=1] + --config-ids Comma-separated configuration IDs to use along with Test Plans. + --milestone-id Milestone ID to which the Test Run should be associated to. [x>=1] + --section-id Section ID to create new sections with test cases under. [x>=1] + --run-description Summary text to be added to the test run. + --case-fields List of case fields and values for new test cases creation. + --result-fields List of result fields and values for test results creation. + --allow-ms Allows using milliseconds for elapsed times. + --upload-feature Generate and upload .feature file to create/update test cases via BDD endpoint. + --feature-section-id Section ID for uploading .feature file (required if --upload-feature is used). [x>=1] + -v, --verbose Enable verbose logging output. + --help Show this message and exit. +``` + +#### Cucumber JSON Format Example +```json +[ + { + "id": "user-login", + "name": "User Login", + "description": "As a registered user\n\tI want to log in to the application\n\tSo that I can access my account", + "uri": "features/login.feature", + "elements": [ + { + "id": "user-login;successful-login-with-valid-credentials", + "name": "Successful login with valid credentials", + "type": "scenario", + "description": "", + "keyword": "Scenario", + "tags": [ + {"name": "@smoke"}, + {"name": "@authentication"} + ], + "steps": [ + { + "keyword": "Given ", + "name": "I have a valid username \"testuser\"", + "result": { + "status": "passed", + "duration": 1500000000 + } + }, + { + "keyword": "When ", + "name": "I enter my credentials", + "result": { + "status": "passed", + "duration": 500000000 + } + }, + { + "keyword": "Then ", + "name": "I should be redirected to the dashboard", + "result": { + "status": "passed", + "duration": 300000000 + } + } + ] + } + ] + } +] +``` + +**Mapping Cucumber JSON to TestRail entities:** + +| Cucumber JSON Element | TestRail Entity | Notes | +|----------------------|-----------------|-------| +| `feature` | Section | Feature name becomes section name | +| `scenario` / `scenario outline` | Test Case | Each scenario creates a test case | +| `step` | Test Step | Steps with results become step results | +| `tags` | Case Tags/Refs | Tags like @smoke, @C123 map to TestRail fields | + +#### Two Workflows for BDD Test Results + +##### Workflow 1: Upload Results Only (Code-First) + +Use this workflow when test cases already exist in TestRail and you want to match them using automation_id. + +```shell +# Upload results to existing test cases +$ trcli parse_cucumber -f cucumber-results.json \ + --project "Your Project" \ + --suite-id 2 \ + --title "BDD Test Run" \ + -n + +# With automation (auto-create test cases if missing) +$ trcli parse_cucumber -f cucumber-results.json \ + --project "Your Project" \ + --suite-id 2 \ + --title "BDD Test Run" \ + -y +``` + +**How it works:** +- Parser creates automation_id from feature name + scenario name +- TestRail CLI matches scenarios to existing cases via automation_id +- Results are uploaded to matched test cases +- With `-y`: Creates new test cases if no match found +- With `-n`: Skips scenarios without matching test cases + +##### Workflow 2: Create BDD Test Cases + Upload Results (Specification-First) + +Use this workflow to automatically create BDD test cases from Cucumber results using TestRail's BDD endpoint. + +```shell +# Create BDD test cases and upload results +$ trcli parse_cucumber -f cucumber-results.json \ + --project "Your Project" \ + --suite-id 2 \ + --upload-feature \ + --feature-section-id 123 \ + --title "BDD Test Run" \ + -y +``` + +**How it works:** +1. Parses Cucumber JSON results +2. Generates complete .feature files (one per feature) +3. Uploads .feature files to TestRail via `add_bdd` endpoint +4. TestRail creates BDD test cases with Gherkin content +5. Maps created case IDs to test results +6. Uploads all scenario results to their respective test cases +7. Sets automation_id on created test cases for future matching + +#### Case Matching for BDD Tests + +BDD test matching works similarly to JUnit, with automation_id generated from your test structure: + +**Automation ID Format:** +``` +. +``` + +**Example:** +``` +Feature: User Login + Scenario: Successful login with valid credentials + +Automation ID: User Login.Successful login with valid credentials +``` + +You can also use Case ID matching with `@C` tags: + +```gherkin +Feature: User Login + @C123 + Scenario: Successful login with valid credentials + Given I am on the login page + When I enter valid credentials + Then I should see the dashboard +``` + +### Importing Gherkin Feature Files + +The `import_gherkin` command allows you to upload BDD test cases in TestRail from existing .feature files. + +#### Reference +```shell +$ trcli import_gherkin --help +Usage: trcli import_gherkin [OPTIONS] + + Import Gherkin .feature file to create BDD test cases in TestRail + +Options: + -f, --file Path to .feature file to import [required] + --section-id Section ID where test cases will be created [x>=1] [required] + -v, --verbose Enable verbose logging output + --help Show this message and exit. +``` + +#### Usage Example +```shell +# Import a single feature file +$ trcli import_gherkin -f features/login.feature \ + --project "Your Project" \ + --section-id 456 \ + -y + +# Import with custom project settings +$ trcli import_gherkin -f features/checkout.feature \ + --project-id 10 \ + --section-id 789 \ + -v -y +``` + +**How it works:** +1. Reads the .feature file +2. Uploads to TestRail via `add_bdd` endpoint +3. TestRail creates test case(s) with complete Gherkin content +4. Returns created case ID(s) + +**Example .feature file:** +```gherkin +Feature: User Login + As a registered user + I want to log in to the application + So that I can access my account + + Background: + Given the application is running + And I am on the login page + + @smoke @authentication + Scenario: Successful login with valid credentials + Given I have a valid username "testuser" + And I have a valid password "password123" + When I enter my credentials + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message "Welcome, testuser" + + @negative @authentication + Scenario: Failed login with invalid password + Given I have a valid username "testuser" + And I have an invalid password "wrongpassword" + When I enter my credentials + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page +``` + +### Exporting BDD Test Cases + +The `export_gherkin` command allows you to export existing BDD test cases from TestRail as .feature files. + +#### Reference +```shell +$ trcli export_gherkin --help +Usage: trcli export_gherkin [OPTIONS] + + Export BDD test case from TestRail as .feature file + +Options: + --case-id TestRail test case ID to export [x>=1] [required] + --output Output path for the .feature file (prints to stdout if not specified) + -v, --verbose Enable verbose logging output + --help Show this message and exit. +``` + +#### Usage Examples +```shell +# Export to stdout +$ trcli export_gherkin --case-id 123 \ + --project "Your Project" + +# Export to file +$ trcli export_gherkin --case-id 123 \ + --project "Your Project" \ + --output features/exported-login.feature + +# Export with verbose logging +$ trcli export_gherkin --case-id 456 \ + --project-id 10 \ + --output features/checkout.feature \ + -v +``` + +**Output example:** +``` +Connecting to TestRail... +Retrieving BDD test case 123... + +✓ Successfully exported test case 123 + File: features/exported-login.feature + Size: 1247 characters +``` + +**Use cases:** +- Extract test cases for automation +- Synchronize TestRail with version control +- Generate documentation from test cases +- Migrate test cases between projects + +### Parsing Gherkin Feature Files Locally + +The `parse_gherkin` command parses Gherkin .feature files locally and converts them into TestRail data structure format without uploading to TestRail. This is useful for validation, conversion, or integration with custom workflows. + +#### Reference +```shell +$ trcli parse_gherkin --help +Usage: trcli parse_gherkin [OPTIONS] + + Parse Gherkin .feature file locally + + This command parses Gherkin/BDD .feature files and converts them into + TestRail data structure format without uploading to TestRail. + +Options: + -f, --file Path to Gherkin .feature file to parse [required] + --output Optional output file path to save parsed JSON + --pretty Pretty print JSON output with indentation + --help Show this message and exit. +``` + +#### Usage Examples +```shell +# Parse a feature file and output to console +$ trcli parse_gherkin -f features/login.feature + +# Parse and save to JSON file with pretty formatting +$ trcli parse_gherkin -f features/login.feature \ + --output parsed-output.json \ + --pretty + +# Parse multiple feature files +$ trcli parse_gherkin -f features/checkout.feature \ + --output checkout.json \ + --pretty +``` + +**Use cases:** +- Validate Gherkin syntax locally before uploading +- Convert .feature files to TestRail JSON format +- Preview how features will be structured in TestRail +- Integrate with custom automation workflows +- Debug feature file parsing issues + +### BDD Mapping to TestRail + +When using parse_cucumber with `--upload-feature`, the following mapping rules apply: + +| Gherkin Element | TestRail Field | Description | +|----------------|----------------|-------------| +| `Feature:` name + description | Test Case title + Preconditions | Feature metadata becomes test case info | +| `Background:` | BDD Scenario field | Shared setup steps | +| `Scenario:` / `Scenario Outline:` | BDD Scenario field | Individual test scenarios | +| `Given`/`When`/`Then`/`And`/`But` | BDD Scenario field | Test steps with keywords | +| `Examples:` table | BDD Scenario field | Data table for scenario outlines | +| `@tags` | References/BDD fields | Tags become references (e.g., @JIRA-123) | +| `@C` tags | Case ID | Map to existing test cases (e.g., @C456) | + ### Exploring other features #### General features @@ -1096,7 +1478,7 @@ Options: ### Reference ```shell $ trcli add_run --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli add_run [OPTIONS] @@ -1220,7 +1602,7 @@ providing you with a solid base of test cases, which you can further expand on T ### Reference ```shell $ trcli parse_openapi --help -TestRail CLI v1.12.4 +TestRail CLI v1.12.5 Copyright 2025 Gurock Software GmbH - www.gurock.com Usage: trcli parse_openapi [OPTIONS] @@ -1343,6 +1725,50 @@ Average time for uploading: - 2000 test cases was around 460 seconds - 5000 test cases was around 1000 seconds +### Parallel Pagination (Experimental) + +The TestRail CLI includes an experimental `--parallel-pagination` option that significantly improves performance when fetching large numbers of test cases from TestRail. This feature uses parallel fetching to retrieve multiple pages of results concurrently, rather than fetching them sequentially. + +#### When to Use Parallel Pagination + +Use `--parallel-pagination` when: +- Working with projects that have thousands of test cases +- Fetching test cases takes a long time during operations +- You need faster case matching and validation during result uploads + +#### How It Works + +When enabled, parallel pagination: +1. Fetches the first page to determine total pages available +2. Uses a thread pool (default: 10 workers set by `MAX_WORKERS_PARALLEL_PAGINATION` in `trcli/settings.py`) to fetch remaining pages concurrently +3. Automatically handles batching to avoid overwhelming the server +4. Combines all results efficiently for processing + +#### Usage + +Enable parallel pagination by adding the `--parallel-pagination` flag to any command: + +```shell +# Enable parallel pagination for faster case fetching during result upload +$ trcli parse_junit -f results.xml --parallel-pagination \ + --host https://yourinstance.testrail.io --username --password \ + --project "Your Project" + +# Example with parse_robot +$ trcli parse_robot -f output.xml --parallel-pagination \ + --host https://yourinstance.testrail.io --username --password \ + --project "Your Project" +``` + +You can also enable this feature globally by setting `ENABLE_PARALLEL_PAGINATION = True` in `trcli/settings.py`. The CLI flag takes precedence over the settings file. + +#### Performance Considerations + +- This feature is most beneficial when dealing with large test case repositories (1000+ cases) +- The default worker count is set to 10, which provides a good balance between speed and server load +- For smaller projects with few test cases, the performance improvement may be negligible +- This is an experimental feature - please report any issues you encounter + Contributing ------------ diff --git a/quick_test_gherkin.py b/quick_test_gherkin.py new file mode 100644 index 0000000..b7dbe53 --- /dev/null +++ b/quick_test_gherkin.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +"""Quick test script to parse any .feature file""" + +import sys +from pathlib import Path +from gherkin.parser import Parser +from gherkin.token_scanner import TokenScanner +import json + + +def parse_feature(filepath): + """Parse a Gherkin feature file and display key information.""" + with open(filepath, "r", encoding="utf-8") as f: + feature_text = f.read() + + parser = Parser() + scanner = TokenScanner(feature_text) + doc = parser.parse(scanner) + + feature = doc["feature"] + + print(f"\n{'='*60}") + print(f"Feature: {feature['name']}") + print(f"{'='*60}") + + # Count elements + scenarios = [c for c in feature["children"] if "scenario" in c] + backgrounds = [c for c in feature["children"] if "background" in c] + + print(f"\nSummary:") + print(f" Backgrounds: {len(backgrounds)}") + print(f" Scenarios: {len(scenarios)}") + + print(f"\nScenarios:") + for idx, child in enumerate(scenarios, 1): + scenario = child["scenario"] + tags = [tag["name"] for tag in scenario.get("tags", [])] + steps = scenario.get("steps", []) + examples = scenario.get("examples", []) + + scenario_type = "Scenario Outline" if examples else "Scenario" + print(f" {idx}. [{scenario_type}] {scenario['name']}") + print(f" Tags: {', '.join(tags) if tags else 'None'}") + print(f" Steps: {len(steps)}") + if examples: + total_examples = sum(len(ex.get("tableBody", [])) for ex in examples) + print(f" Example rows: {total_examples}") + + return doc + + +if __name__ == "__main__": + if len(sys.argv) > 1: + feature_file = Path(sys.argv[1]) + else: + # Default to sample file + feature_file = Path(__file__).parent / "tests" / "test_data" / "FEATURE" / "sample_login.feature" + + if not feature_file.exists(): + print(f"Error: File not found: {feature_file}") + sys.exit(1) + + parse_feature(feature_file) + print(f"\n{'='*60}") + print("✓ Parsing successful!") + print(f"{'='*60}\n") diff --git a/setup.py b/setup.py index 8ca4a94..7b98ee0 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ ], include_package_data=True, install_requires=[ - "click>=8.1.0,<8.2.2", # Note: click version 8.2.2 is yanked as of Aug 2, 2025! + "click>=8.1.0,<8.2.2", # Note: click version 8.2.2 is yanked as of Aug 2, 2025! "pyyaml>=6.0.0,<7.0.0", "junitparser>=3.1.0,<4.0.0", "pyserde==0.12.*", @@ -24,7 +24,8 @@ "humanfriendly>=10.0.0,<11.0.0", "openapi-spec-validator>=0.5.0,<1.0.0", "beartype>=0.17.0,<1.0.0", - "prance" # Does not use semantic versioning + "prance", # Does not use semantic versioning + "gherkin-official>=27.0.0,<28.0.0", # Gherkin/BDD feature file parser ], entry_points=""" [console_scripts] diff --git a/tests/pytest.ini b/tests/pytest.ini index 7cb31ac..9d5f3be 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -12,4 +12,9 @@ markers = data_provider: tests for data provider project_based_client: mark a test as a project-based client test. proxy: test for proxy feature - \ No newline at end of file + cmd_import_gherkin: tests for import_gherkin command + cmd_export_gherkin: tests for export_gherkin command + cmd_parse_gherkin: tests for parse_gherkin command + cmd_parse_cucumber: tests for parse_cucumber command + parse_gherkin: tests for gherkin parser + parse_cucumber: tests for cucumber parser diff --git a/tests/test_api_request_handler.py b/tests/test_api_request_handler.py index 4f17c37..88877e6 100644 --- a/tests/test_api_request_handler.py +++ b/tests/test_api_request_handler.py @@ -23,15 +23,15 @@ def _make_handler(verify=False, custom_json=None): environment.batch_size = 10 environment.case_matcher = MatchersParser.AUTO if custom_json is None: - json_path = ( - Path(__file__).parent / "test_data/json/api_request_handler.json" - ) + json_path = Path(__file__).parent / "test_data/json/api_request_handler.json" else: json_path = custom_json file_json = open(json_path) json_string = json.dumps(json.load(file_json)) test_input = from_json(TestRailSuite, json_string) api_request = ApiRequestHandler(environment, api_client, test_input, verify) + # Clear cache for each test to ensure isolation + api_request._cache.clear() return api_request return _make_handler @@ -39,27 +39,32 @@ def _make_handler(verify=False, custom_json=None): @pytest.fixture(scope="function") def api_request_handler(handler_maker): - yield handler_maker() + handler = handler_maker() + yield handler + # Clean up cache after test + handler._cache.clear() @pytest.fixture(scope="function") def api_request_handler_verify(handler_maker): - yield handler_maker(verify=True) + handler = handler_maker(verify=True) + yield handler + # Clean up cache after test + handler._cache.clear() @pytest.fixture(scope="function") def api_request_handler_update_case_json(handler_maker): - json_path = ( - Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" - ) - yield handler_maker(custom_json=json_path, verify=False) + json_path = Path(__file__).parent / "test_data/json/update_case_result_single_with_id.json" + handler = handler_maker(custom_json=json_path, verify=False) + yield handler + # Clean up cache after test + handler._cache.clear() class TestApiRequestHandler: @pytest.mark.api_handler - def test_return_project( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project(self, api_request_handler: ApiRequestHandler, requests_mock): mocked_response = { "offset": 0, "limit": 250, @@ -72,7 +77,7 @@ def test_return_project( {"id": 1, "name": "DataHub", "suite_mode": 1}, {"id": 2, "name": "Test Project", "suite_mode": 1}, {"id": 3, "name": "DataHub", "suite_mode": 1}, - ] + ], } requests_mock.get(create_url("get_projects"), json=mocked_response) assert api_request_handler.get_project_data("Test Project") == ProjectData( @@ -107,9 +112,7 @@ def test_return_project( ), "Get project should return proper project data object" @pytest.mark.api_handler - def test_return_project_legacy_response( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project_legacy_response(self, api_request_handler: ApiRequestHandler, requests_mock): mocked_response = [ {"id": 1, "name": "DataHub", "suite_mode": 1}, {"id": 2, "name": "Test Project", "suite_mode": 1}, @@ -131,15 +134,15 @@ def test_return_project_legacy_response_with_buggy_authentication_prefix( {"id": 3, "name": "DataHub", "suite_mode": 1}, ] - requests_mock.get(create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n"+json.dumps(mocked_response)) + requests_mock.get( + create_url("get_projects"), text=f"USER AUTHENTICATION SUCCESSFUL!\n" + json.dumps(mocked_response) + ) assert api_request_handler.get_project_data("Test Project") == ProjectData( project_id=2, suite_mode=1, error_message="" ), "Get project should return proper project data object" @pytest.mark.api_handler - def test_check_suite_exists( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_check_suite_exists(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, @@ -155,7 +158,7 @@ def test_check_suite_exists( False, FAULT_MAPPING["missing_suite"].format(suite_id=6), ), "Given suite id should NOT exist in mocked response." - + @pytest.mark.api_handler def test_check_suite_exists_with_pagination(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 @@ -167,7 +170,7 @@ def test_check_suite_exists_with_pagination(self, api_request_handler: ApiReques "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, - ] + ], } requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) @@ -207,16 +210,13 @@ def test_add_suite(self, api_request_handler: ApiRequestHandler, requests_mock): assert error == "", "Error occurred in add_suite" assert ( - api_request_handler.suites_data_from_provider.suite_id - == mocked_response["id"] + api_request_handler.suites_data_from_provider.suite_id == mocked_response["id"] ), "Added suite id in DataProvider doesn't match mocked response id." @pytest.mark.api_handler - def test_check_missing_sections_true( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_sections_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "_links": {"next": None, "prev": None}, "sections": [ @@ -225,25 +225,19 @@ def test_check_missing_sections_true( "suite_id": 4, "name": "Skipped test", } - ] + ], } - requests_mock.get( - create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response - ) + requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response) missing, _ = api_request_handler.check_missing_section_ids(project_id) - update_data_mock.assert_called_with( - section_data=[{'section_id': 0, 'suite_id': 4, 'name': 'Skipped test'}] - ) + update_data_mock.assert_called_with(section_data=[{"section_id": 0, "suite_id": 4, "name": "Skipped test"}]) assert missing, "There should be missing section" @pytest.mark.api_handler - def test_check_missing_sections_false( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_sections_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "_links": {"next": None, "prev": None}, "sections": [ @@ -256,19 +250,17 @@ def test_check_missing_sections_false( "id": 2, "suite_id": 4, "name": "Passed test", - } - ] + }, + ], } - requests_mock.get( - create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response - ) + requests_mock.get(create_url(f"get_sections/{project_id}&suite_id=4"), json=mocked_response) missing, _ = api_request_handler.check_missing_section_ids(project_id) update_data_mock.assert_called_with( section_data=[ - {'name': 'Skipped test', 'section_id': 1, 'suite_id': 4}, - {'name': 'Passed test', 'section_id': 2, 'suite_id': 4} + {"name": "Skipped test", "section_id": 1, "suite_id": 4}, + {"name": "Passed test", "section_id": 2, "suite_id": 4}, ] ) assert not missing, "There should be no missing section" @@ -282,9 +274,7 @@ def test_add_sections(self, api_request_handler: ApiRequestHandler, requests_moc "name": "Passed test", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.add_sections(project_id) assert ( @@ -296,20 +286,17 @@ def test_add_sections(self, api_request_handler: ApiRequestHandler, requests_moc assert error == "", "Error occurred in add_section" assert ( - api_request_handler.suites_data_from_provider.testsections[1].section_id - == mocked_response["id"] + api_request_handler.suites_data_from_provider.testsections[1].section_id == mocked_response["id"] ), "Added section id in DataProvider doesn't match mocked response id." @pytest.mark.api_handler - def test_add_section_and_cases( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_section_and_cases(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response_for_section = { "id": 12345, "suite_id": 4, "name": "Passed test", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } mocked_response_for_case_1 = { @@ -317,7 +304,7 @@ def test_add_section_and_cases( "suite_id": 4, "section_id": 1234, "title": "testCase2", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } mocked_response_for_case_2 = { @@ -325,12 +312,10 @@ def test_add_section_and_cases( "suite_id": 4, "section_id": 12345, "title": "testCase3", - "custom_automation_id": "className.testCase" + "custom_automation_id": "className.testCase", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response_for_section - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section) requests_mock.post( create_url(f"add_case/{mocked_response_for_case_1['section_id']}"), json=mocked_response_for_case_1, @@ -371,9 +356,7 @@ def test_add_run(self, api_request_handler: ApiRequestHandler, requests_mock): requests_mock.post(create_url(f"add_run/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.add_run(project_id, run_name) - assert ( - mocked_response["id"] == resources_added - ), "Added run id doesn't match mocked response id" + assert mocked_response["id"] == resources_added, "Added run id doesn't match mocked response id" assert error == "", "Error occurred in add_case" @pytest.mark.api_handler @@ -394,40 +377,37 @@ def test_add_results(self, api_request_handler: ApiRequestHandler, requests_mock "version": "1.0RC1", } ] - requests_mock.post( - create_url(f"add_results_for_cases/{run_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_results_for_cases/{run_id}"), json=mocked_response) tests_mocked_response = { - 'offset': 0, - 'limit': 250, - 'size': 4, - '_links': {'next': None, 'prev': None}, - 'tests': [ + "offset": 0, + "limit": 250, + "size": 4, + "_links": {"next": None, "prev": None}, + "tests": [ { - 'id': 4, - 'case_id': 1, - 'status_id': 5, - 'assignedto_id': None, - 'run_id': run_id, - 'title': 'Fail To Login With Invalid Password' - } - ] + "id": 4, + "case_id": 1, + "status_id": 5, + "assignedto_id": None, + "run_id": run_id, + "title": "Fail To Login With Invalid Password", + } + ], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response) attachments_mock_response = {"attachment_id": 123} - requests_mock.post( - create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response - ) + requests_mock.post(create_url(f"add_attachment_to_result/{result_id}"), json=attachments_mock_response) with patch("builtins.open", mock_open()) as mock_file: resources_added, error, results_added = api_request_handler.add_results(run_id) assert [mocked_response] == resources_added, "Invalid response from add_results" assert error == "", "Error occurred in add_results" - assert results_added == len(mocked_response), \ - f"Expected {len(mocked_response)} results to be added but got {results_added} instead." + assert results_added == len( + mocked_response + ), f"Expected {len(mocked_response)} results to be added but got {results_added} instead." mock_file.assert_any_call("./path1", "rb") mock_file.assert_any_call("./path2", "rb") @@ -446,12 +426,10 @@ def test_close_run(self, api_request_handler: ApiRequestHandler, requests_mock): assert error == "", "Error occurred in close_run" @pytest.mark.api_handler - def test_check_missing_test_cases_ids_true( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_test_cases_ids_true(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_id = api_request_handler.suites_data_from_provider.suite_id - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response_page_1 = { "_links": { "next": None, @@ -459,16 +437,14 @@ def test_check_missing_test_cases_ids_true( }, "cases": [ {"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234}, - {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234} + {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}, ], } requests_mock.get( create_url(f"get_cases/{project_id}&suite_id={suite_id}"), json=mocked_response_page_1, ) - missing_ids, error = api_request_handler.check_missing_test_cases_ids( - project_id - ) + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) update_data_mock.assert_called_with( case_data=[ @@ -476,27 +452,24 @@ def test_check_missing_test_cases_ids_true( "case_id": 1, "custom_automation_id": "Skipped test.testCase1", "section_id": 1234, - "title": "testCase1" + "title": "testCase1", }, { "case_id": 2, - "custom_automation_id": - "Skipped test.testCase2", + "custom_automation_id": "Skipped test.testCase2", "section_id": 1234, - "title": "testCase2" - } + "title": "testCase2", + }, ] ) assert missing_ids, "There is one missing test case" assert error == "", "Error occurred in check" @pytest.mark.api_handler - def test_check_missing_test_cases_ids_false( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_check_missing_test_cases_ids_false(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_id = api_request_handler.suites_data_from_provider.suite_id - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response_page_1 = { "_links": { "next": f"/api/v2/get_cases/{project_id}&suite_id={suite_id}&limit=1&offset=1", @@ -504,7 +477,7 @@ def test_check_missing_test_cases_ids_false( }, "cases": [ {"title": "testCase1", "custom_automation_id": "Skipped test.testCase1", "id": 1, "section_id": 1234}, - {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234} + {"title": "testCase2", "custom_automation_id": "Skipped test.testCase2", "id": 2, "section_id": 1234}, ], } mocked_response_page_2 = { @@ -521,29 +494,22 @@ def test_check_missing_test_cases_ids_false( create_url(f"get_cases/{project_id}&suite_id={suite_id}&limit=1&offset=1"), json=mocked_response_page_2, ) - missing_ids, error = api_request_handler.check_missing_test_cases_ids( - project_id - ) + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) update_data_mock.assert_called_with( case_data=[ { "case_id": 1, "custom_automation_id": "Skipped test.testCase1", "section_id": 1234, - "title": "testCase1" + "title": "testCase1", }, { "case_id": 2, "custom_automation_id": "Skipped test.testCase2", "section_id": 1234, - "title": "testCase2" + "title": "testCase2", }, - { - "case_id": 1, - "custom_automation_id": "Passed test.testCase3", - "section_id": 2, - "title": "testCase3" - } + {"case_id": 1, "custom_automation_id": "Passed test.testCase3", "section_id": 2, "title": "testCase3"}, ] ) assert not missing_ids, "No missing ids" @@ -560,35 +526,30 @@ def test_get_suite_ids(self, api_request_handler: ApiRequestHandler, requests_mo requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) resources_added, error = api_request_handler.get_suite_ids(project_id) assert ( - resources_added[0] == mocked_response[0]["id"] and - resources_added[1] == mocked_response[1]["id"] + resources_added[0] == mocked_response[0]["id"] and resources_added[1] == mocked_response[1]["id"] ), "ID in response doesn't match mocked response" @pytest.mark.api_handler - def test_get_suite_ids_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_get_suite_ids_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 - - requests_mock.get( - create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout - ) - + + requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout) + suite_ids, error = api_request_handler.get_suite_ids(project_id) - + assert suite_ids == [], "Should return empty list on API error" - assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ - " Please check your settings and try again.", "Should return connection error message" + assert ( + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + " Please check your settings and try again." + ), "Should return connection error message" @pytest.mark.api_handler - def test_resolve_suite_id_using_name( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_resolve_suite_id_using_name(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): project_id = 3 suite_name = "Suite2" api_request_handler.suites_data_from_provider.name = suite_name - update_data_mock = mocker.patch('trcli.api.api_request_handler.ApiDataProvider.update_data') + update_data_mock = mocker.patch("trcli.api.api_request_handler.ApiDataProvider.update_data") mocked_response = { "offset": 0, @@ -598,43 +559,36 @@ def test_resolve_suite_id_using_name( "suites": [ {"id": 4, "name": "Suite1", "description": "Test1", "project_id": 3}, {"id": 5, "name": "Suite2", "description": "Test2", "project_id": 3}, - ] + ], } - + requests_mock.get(create_url(f"get_suites/{project_id}"), json=mocked_response) - + suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) - + assert suite_id == 5, "Should return the correct suite ID for matching name with pagination" assert error == "", "Should have no error message" - + update_data_mock.assert_called_once_with([{"suite_id": 5, "name": "Suite2"}]) @pytest.mark.api_handler - def test_resolve_suite_id_using_name_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_resolve_suite_id_using_name_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 - requests_mock.get( - create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout - ) + requests_mock.get(create_url(f"get_suites/{project_id}"), exc=requests.exceptions.ConnectTimeout) suite_id, error = api_request_handler.resolve_suite_id_using_name(project_id) assert suite_id == -1, "Should return -1 on API error" - assert error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." \ - " Please check your settings and try again.", "Should return connection error message" - + assert ( + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + " Please check your settings and try again." + ), "Should return connection error message" @pytest.mark.api_handler - def test_return_project_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_return_project_error(self, api_request_handler: ApiRequestHandler, requests_mock): - requests_mock.get( - create_url("get_projects"), exc=requests.exceptions.ConnectTimeout - ) + requests_mock.get(create_url("get_projects"), exc=requests.exceptions.ConnectTimeout) assert api_request_handler.get_project_data("Test Project") == ProjectData( project_id=-3, suite_mode=-1, @@ -643,9 +597,7 @@ def test_return_project_error( ), "Get project should return proper project data object with error" @pytest.mark.api_handler - def test_add_suite_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_suite_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 api_request_handler.suites_data_from_provider.suite_id = None @@ -658,15 +610,12 @@ def test_add_suite_error( assert resources_added == [], "No resources should be added" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" @pytest.mark.api_handler - def test_add_sections_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_sections_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 requests_mock.post( create_url(f"add_section/{project_id}"), @@ -676,20 +625,16 @@ def test_add_sections_error( assert resources_added == [], "No resources should be added" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" assert ( - api_request_handler.suites_data_from_provider.testsections[1].section_id - is None + api_request_handler.suites_data_from_provider.testsections[1].section_id is None ), "No resources should be added to DataProvider" @pytest.mark.api_handler - def test_add_section_and_cases_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_section_and_cases_error(self, api_request_handler: ApiRequestHandler, requests_mock): project_id = 3 mocked_response_for_section = { "id": 12345, @@ -702,7 +647,7 @@ def test_add_section_and_cases_error( "suite_id": 4, "section_id": 1234, "title": "testCase2", - "custom_automation_id": "Skipped test.testCase2" + "custom_automation_id": "Skipped test.testCase2", } mocked_response_for_case_2 = { @@ -710,12 +655,10 @@ def test_add_section_and_cases_error( "suite_id": 4, "section_id": 12345, "title": "testCase3", - "custom_automation_id": "Passed test.testCase3" + "custom_automation_id": "Passed test.testCase3", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response_for_section - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response_for_section) requests_mock.post( create_url(f"add_case/{mocked_response_for_case_1['section_id']}"), json=mocked_response_for_case_1, @@ -735,65 +678,55 @@ def test_add_section_and_cases_error( mocked_response_for_case_1["id"], ], "Added case id doesn't match mocked response id" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" @pytest.mark.api_handler - def test_add_results_error( - self, api_request_handler: ApiRequestHandler, requests_mock - ): + def test_add_results_error(self, api_request_handler: ApiRequestHandler, requests_mock): run_id = 3 requests_mock.post( create_url(f"add_results_for_cases/{run_id}"), exc=requests.exceptions.ConnectTimeout, ) tests_mocked_response = { - 'offset': 0, - 'limit': 250, - 'size': 4, - '_links': {'next': None, 'prev': None}, - 'tests': [ + "offset": 0, + "limit": 250, + "size": 4, + "_links": {"next": None, "prev": None}, + "tests": [ { - 'id': 18319, - 'case_id': 6086, - 'status_id': 5, - 'assignedto_id': None, - 'run_id': run_id, - 'title': 'Fail To Login With Invalid Password' - } - ] + "id": 18319, + "case_id": 6086, + "status_id": 5, + "assignedto_id": None, + "run_id": run_id, + "title": "Fail To Login With Invalid Password", + } + ], } requests_mock.get(create_url(f"get_tests/{run_id}"), json=tests_mocked_response) resources_added, error, results_added = api_request_handler.add_results(run_id) assert resources_added == [], "Expected empty list of added resources" assert ( - error - == "Your upload to TestRail did not receive a successful response from your TestRail Instance." + error == "Your upload to TestRail did not receive a successful response from your TestRail Instance." " Please check your settings and try again." ), "Connection error is expected" assert results_added == 0, "Expected 0 resources to be added." @pytest.mark.api_handler - def test_add_results_keyboard_interrupt( - self, api_request_handler: ApiRequestHandler, requests_mock, mocker - ): + def test_add_results_keyboard_interrupt(self, api_request_handler: ApiRequestHandler, requests_mock, mocker): run_id = 3 requests_mock.post( create_url(f"add_results_for_cases/{run_id}"), exc=requests.exceptions.ConnectTimeout, ) - mocker.patch( - "trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt - ) + mocker.patch("trcli.api.api_request_handler.as_completed", side_effect=KeyboardInterrupt) with pytest.raises(KeyboardInterrupt) as exception: api_request_handler.add_results(run_id) @pytest.mark.api_handler - def test_add_suite_with_verify( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_add_suite_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock): project_id = 3 mocked_response = { "description": "..", @@ -818,9 +751,7 @@ def test_add_section_with_verify(self, handler_maker, requests_mock): "description": "Some description", } - requests_mock.post( - create_url(f"add_section/{project_id}"), json=mocked_response - ) + requests_mock.post(create_url(f"add_section/{project_id}"), json=mocked_response) api_request_handler_verify = handler_maker(verify=True) resources_added, error = api_request_handler_verify.add_sections(project_id) assert error == "", "There should be no error in verification." @@ -828,45 +759,34 @@ def test_add_section_with_verify(self, handler_maker, requests_mock): api_request_handler_verify = handler_maker(verify=True) resources_added, error = api_request_handler_verify.add_sections(project_id) assert ( - error - == "Data verification failed. Server added different resource than expected." + error == "Data verification failed. Server added different resource than expected." ), "There should be error in verification." @pytest.mark.api_handler - def test_add_case_with_verify( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_add_case_with_verify(self, api_request_handler_verify: ApiRequestHandler, requests_mock): mocked_response_for_case = { "id": 3, "suite_id": 4, "section_id": 1234, "title": "testCase2", "estimate": "30s", - "custom_automation_id": "Skipped test.testCase2" + "custom_automation_id": "Skipped test.testCase2", } requests_mock.post( create_url(f"add_case/{mocked_response_for_case['section_id']}"), json=mocked_response_for_case, ) - del api_request_handler_verify.suites_data_from_provider.testsections[ - 1 - ].testcases[0] + del api_request_handler_verify.suites_data_from_provider.testsections[1].testcases[0] resources_added, error = api_request_handler_verify.add_cases() assert error == "", "There should be no error in verification." mocked_response_for_case["estimate"] = "60s" - api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[ - 1 - ].case_id = None + api_request_handler_verify.suites_data_from_provider.testsections[0].testcases[1].case_id = None resources_added, error = api_request_handler_verify.add_cases() - assert ( - error == FAULT_MAPPING["data_verification_error"] - ), "There should be error in verification." + assert error == FAULT_MAPPING["data_verification_error"], "There should be error in verification." @pytest.mark.api_handler - def test_delete_section( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_section(self, api_request_handler_verify: ApiRequestHandler, requests_mock): sections_id = [{"section_id": 1}] mocked_response_for_case = {"success": 200} @@ -879,9 +799,7 @@ def test_delete_section( assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_suite( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_suite(self, api_request_handler_verify: ApiRequestHandler, requests_mock): suite_id = 1 mocked_response_for_case = {"success": 200} @@ -894,9 +812,7 @@ def test_delete_suite( assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_cases( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_cases(self, api_request_handler_verify: ApiRequestHandler, requests_mock): suite_id = 1 cases = [{"case_id": 1}] mocked_response_for_case = {"success": 200} @@ -906,15 +822,11 @@ def test_delete_cases( json=mocked_response_for_case, ) - resources_added, error = api_request_handler_verify.delete_cases( - suite_id, cases - ) + resources_added, error = api_request_handler_verify.delete_cases(suite_id, cases) assert error == "", "There should be no error in verification." @pytest.mark.api_handler - def test_delete_run( - self, api_request_handler_verify: ApiRequestHandler, requests_mock - ): + def test_delete_run(self, api_request_handler_verify: ApiRequestHandler, requests_mock): run_id = 1 mocked_response_for_case = {"success": 200} @@ -925,3 +837,433 @@ def test_delete_run( resources_added, error = api_request_handler_verify.delete_run(run_id) assert error == "", "There should be no error in verification." + + @pytest.mark.api_handler + def test_add_bdd_success(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test successful .feature file upload via add_bdd endpoint""" + section_id = 123 + feature_content = "@smoke\nFeature: User Login\n Scenario: Successful login" + + # API returns standard TestRail test case JSON with 'id' field + # File upload uses multipart/form-data + mocked_response = {"id": 101, "title": "Successful login", "section_id": 123, "template_id": 1} + + requests_mock.post( + create_url(f"add_bdd/{section_id}"), + json=mocked_response, + ) + + case_ids, error = api_request_handler.add_bdd(section_id, feature_content) + + assert case_ids == [101], "Should return list with single case ID" + assert error == "", "There should be no error" + + @pytest.mark.api_handler + def test_get_bdd_success(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test successful .feature file retrieval via get_bdd endpoint""" + case_id = 456 + expected_feature = "@smoke\nFeature: User Login" + + # API returns raw Gherkin text + mocked_response = expected_feature + + requests_mock.get( + create_url(f"get_bdd/{case_id}"), + text=mocked_response, + ) + + feature_content, error = api_request_handler.get_bdd(case_id) + + assert feature_content == expected_feature, "Should return feature content" + assert error == "", "There should be no error" + + @pytest.mark.api_handler + def test_update_run_with_include_all_false_standalone(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test update_run for standalone run with include_all=false""" + run_id = 100 + run_name = "Updated Test Run" + + # Mock get_run response - standalone run (no plan_id), include_all=false + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": False, + "plan_id": None, + "config_ids": [], + } + + # Mock get_tests response - existing cases in run + get_tests_response = { + "offset": 0, + "limit": 250, + "size": 2, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1, "case_id": 1, "status_id": 1}, {"id": 2, "case_id": 2, "status_id": 1}], + } + + # Mock update_run response + update_run_response = {"id": run_id, "name": run_name} + + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response) + requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response) + + # Execute update_run + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert error == "", "No error should occur" + assert run_data["id"] == run_id, "Run ID should match" + + # Verify the payload sent to update_run + request_history = requests_mock.request_history + update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0] + payload = update_request.json() + + assert payload["include_all"] == False, "include_all should be False" + assert "case_ids" in payload, "case_ids should be present" + # Should contain union of existing (1, 2) and report cases + assert set(payload["case_ids"]) >= {1, 2}, "Should include existing case IDs" + + @pytest.mark.api_handler + def test_update_run_with_include_all_false_plan_with_config( + self, api_request_handler: ApiRequestHandler, requests_mock + ): + """Test update_run for run in plan with config and include_all=false (the bug scenario)""" + run_id = 200 + run_name = "Updated Test Run in Plan" + + # Mock get_run response - run in plan with config, include_all=false + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": False, + "plan_id": 10, + "config_ids": [5, 6], # Has configs - will use update_run_in_plan_entry + } + + # Mock get_tests response - existing cases + get_tests_response = { + "offset": 0, + "limit": 250, + "size": 3, + "_links": {"next": None, "prev": None}, + "tests": [ + {"id": 1, "case_id": 188, "status_id": 1}, + {"id": 2, "case_id": 180, "status_id": 1}, + {"id": 3, "case_id": 191, "status_id": 1}, + ], + } + + # Mock update_run_in_plan_entry response + update_run_response = {"id": run_id, "name": run_name} + + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response) + requests_mock.post(create_url(f"update_run_in_plan_entry/{run_id}"), json=update_run_response) + + # Execute update_run + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert error == "", "No error should occur" + assert run_data["id"] == run_id, "Run ID should match" + + # Verify the payload sent to update_run_in_plan_entry + request_history = requests_mock.request_history + update_request = [r for r in request_history if "update_run_in_plan_entry" in r.url][0] + payload = update_request.json() + + # THIS IS THE CRITICAL FIX - must include include_all=False + assert payload["include_all"] == False, "include_all must be False (fixes the bug)" + assert "case_ids" in payload, "case_ids should be present" + # Should contain union of existing (188, 180, 191) and report cases + assert set(payload["case_ids"]) >= {188, 180, 191}, "Should preserve existing case IDs" + + @pytest.mark.api_handler + def test_update_run_with_include_all_true_preserves_setting( + self, api_request_handler: ApiRequestHandler, requests_mock + ): + """Test update_run preserves include_all=true and doesn't send case_ids""" + run_id = 300 + run_name = "Updated Run with Include All" + + # Mock get_run response - include_all=true + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": True, # Run includes all cases + "plan_id": None, + "config_ids": [], + } + + # Mock update_run response + update_run_response = {"id": run_id, "name": run_name, "include_all": True} + + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.post(create_url(f"update_run/{run_id}"), json=update_run_response) + + # Execute update_run + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert error == "", "No error should occur" + assert run_data["include_all"] == True, "include_all should be preserved" + + # Verify the payload sent to update_run + request_history = requests_mock.request_history + update_request = [r for r in request_history if "update_run" in r.url and r.method == "POST"][0] + payload = update_request.json() + + assert payload["include_all"] == True, "include_all should be True" + assert "case_ids" not in payload, "case_ids should NOT be present when include_all=True" + + @pytest.mark.api_handler + def test_update_run_handles_get_tests_error(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test update_run handles errors from get_tests gracefully""" + run_id = 400 + run_name = "Test Run" + + # Mock get_run response - include_all=false + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": False, + "plan_id": None, + "config_ids": [], + } + + # Mock get_tests to return error (403 Forbidden, for example) + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.get(create_url(f"get_tests/{run_id}"), status_code=403, json={"error": "Access denied"}) + + # Execute update_run - should fail gracefully + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert run_data is None, "run_data should be None on error" + assert error is not None, "Error message should be present" + assert "Failed to get tests in run" in error, "Error should indicate get_tests failure" + + @pytest.mark.api_handler + def test_update_run_with_include_all_false_plan_without_config( + self, api_request_handler: ApiRequestHandler, requests_mock + ): + """Test update_run for run in plan without config uses update_plan_entry""" + run_id = 500 + run_name = "Updated Test Run in Plan No Config" + plan_id = 20 + entry_id = "abc-123" + + # Mock get_run response - run in plan without config + get_run_response = { + "id": run_id, + "name": "Original Run", + "description": "Original description", + "refs": "REF-1", + "include_all": False, + "plan_id": plan_id, + "config_ids": [], # No configs - will use update_plan_entry + } + + # Mock get_tests response + get_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1, "case_id": 50, "status_id": 1}], + } + + # Mock get_plan response + get_plan_response = { + "id": plan_id, + "entries": [{"id": entry_id, "runs": [{"id": run_id, "entry_id": entry_id}]}], + } + + # Mock update_plan_entry response + update_plan_response = {"id": run_id, "name": run_name} + + requests_mock.get(create_url(f"get_run/{run_id}"), json=get_run_response) + requests_mock.get(create_url(f"get_tests/{run_id}"), json=get_tests_response) + requests_mock.get(create_url(f"get_plan/{plan_id}"), json=get_plan_response) + requests_mock.post(create_url(f"update_plan_entry/{plan_id}/{entry_id}"), json=update_plan_response) + + # Execute update_run + run_data, error = api_request_handler.update_run(run_id, run_name) + + # Assertions + assert error == "", "No error should occur" + assert run_data["id"] == run_id, "Run ID should match" + + # Verify update_plan_entry was called with correct payload + request_history = requests_mock.request_history + update_request = [r for r in request_history if f"update_plan_entry/{plan_id}/{entry_id}" in r.url][0] + payload = update_request.json() + + assert payload["include_all"] == False, "include_all should be False" + assert "case_ids" in payload, "case_ids should be present" + assert 50 in payload["case_ids"], "Should include existing case ID" + + @pytest.mark.api_handler + def test_upload_attachments_413_error(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path): + """Test that 413 errors (file too large) are properly reported.""" + run_id = 1 + + # Mock get_tests endpoint + mocked_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1001, "case_id": 100}], + } + requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response) + + # Create a temporary test file + test_file = tmp_path / "large_attachment.jpg" + test_file.write_text("test content") + + # Mock add_attachment_to_result endpoint to return 413 + requests_mock.post( + create_url("add_attachment_to_result/2001"), + status_code=413, + text='\n\n413 Request Entity Too Large\n\n

Request Entity Too Large

\n\n', + ) + + # Prepare test data + report_results = [{"case_id": 100, "attachments": [str(test_file)]}] + results = [{"id": 2001, "test_id": 1001}] + + # Call upload_attachments + api_request_handler.upload_attachments(report_results, results, run_id) + + # Verify the request was made (case-insensitive comparison) + assert requests_mock.last_request.url.lower() == create_url("add_attachment_to_result/2001").lower() + + @pytest.mark.api_handler + def test_upload_attachments_success(self, api_request_handler: ApiRequestHandler, requests_mock, tmp_path): + """Test that successful attachment uploads work correctly.""" + run_id = 1 + + # Mock get_tests endpoint + mocked_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1001, "case_id": 100}], + } + requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response) + + # Create a temporary test file + test_file = tmp_path / "test_attachment.jpg" + test_file.write_text("test content") + + # Mock add_attachment_to_result endpoint to return success + requests_mock.post(create_url("add_attachment_to_result/2001"), status_code=200, json={"attachment_id": 5001}) + + # Prepare test data + report_results = [{"case_id": 100, "attachments": [str(test_file)]}] + results = [{"id": 2001, "test_id": 1001}] + + # Call upload_attachments + api_request_handler.upload_attachments(report_results, results, run_id) + + # Verify the request was made (case-insensitive comparison) + assert requests_mock.last_request.url.lower() == create_url("add_attachment_to_result/2001").lower() + + @pytest.mark.api_handler + def test_upload_attachments_file_not_found(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test that missing attachment files are properly reported.""" + run_id = 1 + + # Mock get_tests endpoint + mocked_tests_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "tests": [{"id": 1001, "case_id": 100}], + } + requests_mock.get(create_url(f"get_tests/{run_id}"), json=mocked_tests_response) + + # Prepare test data with non-existent file + report_results = [{"case_id": 100, "attachments": ["/path/to/nonexistent/file.jpg"]}] + results = [{"id": 2001, "test_id": 1001}] + + # Call upload_attachments - should not raise exception + api_request_handler.upload_attachments(report_results, results, run_id) + + @pytest.mark.api_handler + def test_caching_reduces_api_calls(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test that caching reduces the number of API calls for repeated requests""" + mocked_response = { + "offset": 0, + "limit": 250, + "size": 2, + "_links": {"next": None, "prev": None}, + "projects": [ + {"id": 1, "name": "DataHub", "suite_mode": 1}, + {"id": 2, "name": "Test Project", "suite_mode": 1}, + ], + } + + # Set up mock + mock_get = requests_mock.get(create_url("get_projects"), json=mocked_response) + + # First call should hit the API + result1 = api_request_handler.get_project_data("Test Project") + assert result1.project_id == 2 + assert mock_get.call_count == 1, "First call should hit the API" + + # Second call should use cache + result2 = api_request_handler.get_project_data("Test Project") + assert result2.project_id == 2 + assert mock_get.call_count == 1, "Second call should use cache, not hit API again" + + # Third call with different name should still use cache (same endpoint) + result3 = api_request_handler.get_project_data("DataHub") + assert result3.project_id == 1 + assert mock_get.call_count == 1, "Third call should still use cached data" + + @pytest.mark.api_handler + def test_cache_stats(self, api_request_handler: ApiRequestHandler, requests_mock): + """Test that cache statistics are tracked correctly""" + mocked_response = { + "offset": 0, + "limit": 250, + "size": 1, + "_links": {"next": None, "prev": None}, + "projects": [{"id": 1, "name": "Test Project", "suite_mode": 1}], + } + + requests_mock.get(create_url("get_projects"), json=mocked_response) + + # Check initial stats + stats = api_request_handler._cache.get_stats() + assert stats["hit_count"] == 0 + assert stats["miss_count"] == 0 + assert stats["size"] == 0 + + # Make first call (cache miss) + api_request_handler.get_project_data("Test Project") + stats = api_request_handler._cache.get_stats() + assert stats["miss_count"] == 1 + assert stats["hit_count"] == 0 + assert stats["size"] == 1 + + # Make second call (cache hit) + api_request_handler.get_project_data("Test Project") + stats = api_request_handler._cache.get_stats() + assert stats["miss_count"] == 1 + assert stats["hit_count"] == 1 + assert stats["hit_rate"] == 50.0 # 1 hit out of 2 total requests diff --git a/tests/test_api_request_handler_case_matcher.py b/tests/test_api_request_handler_case_matcher.py new file mode 100644 index 0000000..6d4bb2f --- /dev/null +++ b/tests/test_api_request_handler_case_matcher.py @@ -0,0 +1,555 @@ +""" +Unit tests for NAME matcher optimization that skips fetching all cases. + +Tests the performance optimization introduced to avoid downloading 165k+ cases +when using NAME or PROPERTY matcher, which only need to validate specific case IDs. +""" + +import pytest +from unittest.mock import patch, MagicMock, call +from pathlib import Path +import json +from serde.json import from_json + +from tests.helpers.api_client_helpers import TEST_RAIL_URL, create_url +from trcli.cli import Environment +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.api.api_client import APIClient, APIClientResult +from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailSection, TestRailCase, TestRailResult +from trcli.data_classes.data_parsers import MatchersParser + + +@pytest.fixture +def environment(): + """Create test environment""" + env = Environment() + env.project = "Test Project" + env.batch_size = 10 + return env + + +@pytest.fixture +def api_client(): + """Create test API client""" + return APIClient(host_name=TEST_RAIL_URL) + + +def create_test_suite_with_case_ids(num_cases=10): + """Helper to create test suite with specified number of cases with case IDs""" + test_cases = [] + for i in range(1, num_cases + 1): + test_case = TestRailCase( + case_id=i, + title=f"Test case {i}", + section_id=1, + result=TestRailResult(case_id=i, comment=f"Test result {i}", elapsed="1s", status_id=1), + ) + test_cases.append(test_case) + + section = TestRailSection(name="Test Section", section_id=1, suite_id=1, testcases=test_cases) + + return TestRailSuite(name="Test Suite", suite_id=1, testsections=[section]) + + +def create_test_suite_with_missing_case_ids(total_cases=10, missing_count=3): + """Helper to create test suite with some cases missing IDs""" + test_cases = [] + for i in range(1, total_cases + 1): + # First `missing_count` cases don't have case_id + case_id = None if i <= missing_count else i + test_case = TestRailCase( + case_id=case_id, + title=f"Test case {i}", + section_id=1, + result=TestRailResult(case_id=case_id, comment=f"Test result {i}", elapsed="1s", status_id=1), + ) + test_cases.append(test_case) + + section = TestRailSection(name="Test Section", section_id=1, suite_id=1, testcases=test_cases) + + return TestRailSuite(name="Test Suite", suite_id=1, testsections=[section]) + + +class TestNameMatcherOptimization: + """Test suite for NAME matcher performance optimizations""" + + @pytest.mark.api_handler + def test_name_matcher_skips_bulk_case_fetch(self, environment, api_client, mocker): + """ + Test that NAME matcher does NOT fetch all cases from TestRail. + This is the key optimization - we should skip the expensive get_all_cases call. + """ + # Setup: NAME matcher with 100 test cases + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=100) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock the get_all_cases method to track if it's called + mock_get_all_cases = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__get_all_cases", return_value=([], None) + ) + + # Mock validation to return all IDs as valid (skip actual validation) + mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 101)) + ) + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: get_all_cases should NOT have been called for NAME matcher + mock_get_all_cases.assert_not_called() + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_auto_matcher_still_fetches_all_cases(self, environment, api_client, mocker): + """ + Test that AUTO matcher STILL fetches all cases (required for automation ID lookup). + This ensures we didn't break the AUTO matcher functionality. + """ + # Setup: AUTO matcher + environment.case_matcher = MatchersParser.AUTO + test_suite = create_test_suite_with_case_ids(num_cases=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock get_all_cases to return some cases + mock_cases = [ + {"id": i, "custom_automation_id": f"test{i}", "title": f"Test {i}", "section_id": 1} for i in range(1, 11) + ] + mock_get_all_cases = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__get_all_cases", return_value=(mock_cases, None) + ) + + mocker.patch.object(api_request_handler.data_provider, "update_data") + + # Execute + project_id = 1 + api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: get_all_cases SHOULD be called for AUTO matcher + mock_get_all_cases.assert_called_once_with(project_id, 1) + + @pytest.mark.api_handler + def test_name_matcher_skips_validation_for_large_batches(self, environment, api_client, mocker): + """ + Test that validation is SKIPPED when: + - Using NAME matcher + - All tests have case IDs (no missing) + - More than 1000 case IDs (large batch) + """ + # Setup: NAME matcher with 2000 test cases (> 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation method to track if it's called + mock_validate = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 2001)) + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Validation should be SKIPPED for large batches + mock_validate.assert_not_called() + + # Should log that validation was skipped + skip_log_calls = [call for call in mock_log.call_args_list if "Skipping validation" in str(call)] + assert len(skip_log_calls) > 0, "Should log that validation was skipped" + + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_validates_small_batches(self, environment, api_client, mocker): + """ + Test that validation RUNS when: + - Using NAME matcher + - Less than 1000 case IDs (small batch) + """ + # Setup: NAME matcher with 500 test cases (< 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=500) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation method to track if it's called + mock_validate = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 501)) + ) + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Validation SHOULD run for small batches + mock_validate.assert_called_once() + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_fetches_all_cases_for_large_report_with_missing_ids(self, environment, api_client, mocker): + """ + Test that for large reports with missing IDs, we FETCH ALL CASES instead of individual validation. + This is the new optimized behavior: + - Using NAME matcher + - Large report (>=1000 total cases) + - Some tests are missing case IDs + + Strategy: Fetch all cases once (e.g., 660 calls for 165k cases) is more efficient than + individual validation (e.g., 1500 calls for 1500 cases in report). + """ + # Setup: 1500 total cases, 3 missing IDs (total >= 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_missing_case_ids(total_cases=1500, missing_count=3) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock get_all_cases to return all case IDs 4-1500 (cases 1-3 don't exist, matching missing IDs) + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i} for i in range(4, 1501)], None), + ) + + # Mock individual validation - should NOT be called for large reports + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(4, 1501)), + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Should FETCH ALL CASES for large reports with missing IDs + mock_get_all_cases.assert_called_once_with(project_id, 1) + + # Should NOT use individual validation + mock_validate.assert_not_called() + + # Should log that it's using fetch-all strategy + fetch_log_calls = [call for call in mock_log.call_args_list if "Fetching all cases" in str(call)] + assert len(fetch_log_calls) > 0, "Should log that fetch-all strategy is being used" + + # Should log that missing cases were found + missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)] + assert len(missing_log_calls) > 0, "Should log missing case IDs" + + assert missing_ids, "Should have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_validates_individually_for_small_report_with_missing_ids( + self, environment, api_client, mocker + ): + """ + Test that for small reports with missing IDs, we use INDIVIDUAL validation. + - Using NAME matcher + - Small report (<1000 total cases) + - Some tests are missing case IDs + + Strategy: Individual validation (e.g., 500 calls) is more efficient than + fetch all (e.g., 660 calls for 165k cases). + """ + # Setup: 500 total cases, 10 missing IDs (total < 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_missing_case_ids(total_cases=500, missing_count=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock individual validation + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(11, 501)), # Exclude the 10 missing (1-10) + ) + + # Mock get_all_cases - should NOT be called for small reports + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([], None), + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Should use INDIVIDUAL validation for small reports + mock_validate.assert_called_once() + + # Should NOT fetch all cases + mock_get_all_cases.assert_not_called() + + # Should log that missing cases were found + missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)] + assert len(missing_log_calls) > 0, "Should log missing case IDs" + + assert missing_ids, "Should have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_detects_nonexistent_case_ids(self, environment, api_client, mocker): + """ + Test that NAME matcher correctly detects case IDs that don't exist in TestRail. + """ + # Setup: Test suite with case IDs 1-10 + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation: Only IDs 1-5 exist, 6-10 don't exist + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(1, 6)), # Only 1-5 exist + ) + + mock_elog = mocker.patch.object(environment, "elog") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Should detect nonexistent IDs + mock_validate.assert_called_once() + mock_elog.assert_called_once() + + # Check error message contains nonexistent IDs + error_call = mock_elog.call_args[0][0] + assert "Nonexistent case IDs" in error_call + assert "6" in error_call or "7" in error_call # At least some of the missing IDs + + assert not missing_ids, "missing_ids refers to tests without IDs in report" + assert error != "", "Should have error about nonexistent IDs" + + +class TestValidateCaseIdsExist: + """Test the __validate_case_ids_exist helper method""" + + @pytest.mark.api_handler + def test_validate_empty_list(self, environment, api_client): + """Test that empty list returns empty set""" + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[]) + + assert result == set(), "Empty list should return empty set" + + @pytest.mark.api_handler + def test_validate_small_batch_sequential(self, environment, api_client, requests_mock): + """ + Test validation of small batch (<=50 cases) uses sequential validation. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock get_case responses for IDs 1-10 + for i in range(1, 11): + requests_mock.get(create_url(f"get_case/{i}"), json={"id": i, "suite_id": 1, "title": f"Case {i}"}) + + # Add one non-existent case (returns 404) + requests_mock.get(create_url("get_case/999"), status_code=404, json={"error": "Not found"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist( + suite_id=1, case_ids=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999] + ) + + # Should return 1-10 (11 total requested, 1 doesn't exist) + assert result == set(range(1, 11)), "Should validate existing cases" + assert 999 not in result, "Non-existent case should not be in result" + + @pytest.mark.api_handler + def test_validate_large_batch_concurrent(self, environment, api_client, requests_mock): + """ + Test validation of large batch (>50 cases) uses concurrent validation. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock 100 case responses + for i in range(1, 101): + requests_mock.get(create_url(f"get_case/{i}"), json={"id": i, "suite_id": 1, "title": f"Case {i}"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist( + suite_id=1, case_ids=list(range(1, 101)) + ) + + # Should validate all 100 cases concurrently + assert result == set(range(1, 101)), "Should validate all cases" + assert len(result) == 100 + + @pytest.mark.api_handler + def test_validate_filters_wrong_suite(self, environment, api_client, requests_mock): + """ + Test that validation filters out cases belonging to different suite. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Case 1 belongs to suite 1 (correct) + requests_mock.get(create_url("get_case/1"), json={"id": 1, "suite_id": 1, "title": "Case 1"}) + + # Case 2 belongs to suite 2 (wrong suite) + requests_mock.get(create_url("get_case/2"), json={"id": 2, "suite_id": 2, "title": "Case 2"}) + + # Case 3 belongs to suite 1 (correct) + requests_mock.get(create_url("get_case/3"), json={"id": 3, "suite_id": 1, "title": "Case 3"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[1, 2, 3]) + + # Should only return cases from suite 1 + assert result == {1, 3}, "Should filter out case from wrong suite" + assert 2 not in result, "Case from wrong suite should be excluded" + + @pytest.mark.api_handler + def test_validate_handles_api_errors(self, environment, api_client, requests_mock): + """ + Test that validation gracefully handles API errors (404, 500, etc). + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Case 1: Success + requests_mock.get(create_url("get_case/1"), json={"id": 1, "suite_id": 1, "title": "Case 1"}) + + # Case 2: 404 Not Found + requests_mock.get(create_url("get_case/2"), status_code=404, json={"error": "Not found"}) + + # Case 3: 500 Server Error + requests_mock.get(create_url("get_case/3"), status_code=500, json={"error": "Internal error"}) + + # Case 4: Success + requests_mock.get(create_url("get_case/4"), json={"id": 4, "suite_id": 1, "title": "Case 4"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[1, 2, 3, 4]) + + # Should return only successful cases + assert result == {1, 4}, "Should only return successfully validated cases" + + +class TestPerformanceComparison: + """Tests demonstrating the performance improvement""" + + @pytest.mark.api_handler + def test_performance_auto_vs_name_matcher(self, environment, api_client, mocker): + """ + Demonstrate that NAME matcher makes fewer API calls than AUTO matcher. + This is a documentation test showing the optimization benefit. + + Scenario: Large report with all case IDs present (best case for NAME matcher) + """ + # Test AUTO matcher (always fetches all cases) + environment.case_matcher = MatchersParser.AUTO + test_suite_auto = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler_auto = ApiRequestHandler(environment, api_client, test_suite_auto) + + mock_get_all_cases_auto = mocker.patch.object( + api_request_handler_auto, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i, "custom_automation_id": f"test{i}"} for i in range(1, 2001)], None), + ) + mocker.patch.object(api_request_handler_auto.data_provider, "update_data") + + api_request_handler_auto.check_missing_test_cases_ids(project_id=1) + + # AUTO matcher should call get_all_cases + assert mock_get_all_cases_auto.call_count == 1, "AUTO matcher fetches all cases" + + # Test NAME matcher with all IDs present (best case - skips validation) + env_name = Environment() + env_name.project = "Test Project" + env_name.batch_size = 10 + env_name.case_matcher = MatchersParser.NAME + + test_suite_name = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler_name = ApiRequestHandler(env_name, api_client, test_suite_name) + + mock_get_all_cases_name = mocker.patch.object( + api_request_handler_name, "_ApiRequestHandler__get_all_cases", return_value=([], None) + ) + + mock_validate_name = mocker.patch.object( + api_request_handler_name, "_ApiRequestHandler__validate_case_ids_exist", return_value=set() + ) + + mocker.patch.object(env_name, "log") + + api_request_handler_name.check_missing_test_cases_ids(project_id=1) + + # NAME matcher should NOT call get_all_cases when all IDs present and report >= 1000 + mock_get_all_cases_name.assert_not_called() + # Should also not call individual validation + mock_validate_name.assert_not_called() + + print("\n" + "=" * 60) + print("PERFORMANCE COMPARISON") + print("=" * 60) + print(f"AUTO matcher: {mock_get_all_cases_auto.call_count} get_all_cases calls") + print(f"NAME matcher: {mock_get_all_cases_name.call_count} get_all_cases calls") + print(f"Improvement: {mock_get_all_cases_auto.call_count - mock_get_all_cases_name.call_count} fewer calls") + print("=" * 60) + + @pytest.mark.api_handler + def test_performance_name_matcher_with_missing_ids(self, environment, api_client, mocker): + """ + Demonstrate smart strategy selection for NAME matcher with large reports containing missing IDs. + + Scenario: 5000 cases in report, 100 missing IDs + - Individual validation: 5000 API calls + - Fetch all + validate locally: ~660 API calls (for 165k cases in TestRail) + Strategy: Fetch all is more efficient + """ + env = Environment() + env.project = "Test Project" + env.batch_size = 10 + env.case_matcher = MatchersParser.NAME + + # 5000 cases, 100 missing IDs + test_suite = create_test_suite_with_missing_case_ids(total_cases=5000, missing_count=100) + api_request_handler = ApiRequestHandler(env, api_client, test_suite) + + # Mock get_all_cases to simulate fetching 165k cases + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i} for i in range(101, 5001)], None), # Cases 101-5000 exist + ) + + # Mock individual validation - should NOT be called + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(101, 5001)), + ) + + mocker.patch.object(env, "log") + + api_request_handler.check_missing_test_cases_ids(project_id=1) + + # Should use fetch-all strategy (more efficient for large reports) + mock_get_all_cases.assert_called_once() + mock_validate.assert_not_called() + + print("\n" + "=" * 60) + print("LARGE REPORT WITH MISSING IDS") + print("=" * 60) + print(f"Report size: 5000 cases, 100 missing IDs") + print(f"Strategy chosen: Fetch all cases") + print(f"API calls: 1 fetch (simulates ~660 paginated calls)") + print(f"Alternative: 4900 individual validation calls") + print(f"Efficiency: ~7.4x fewer calls") + print("=" * 60) + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "-s"]) diff --git a/tests/test_api_request_handler_labels.py b/tests/test_api_request_handler_labels.py index c447613..9a731c6 100644 --- a/tests/test_api_request_handler_labels.py +++ b/tests/test_api_request_handler_labels.py @@ -21,13 +21,12 @@ def labels_handler(): environment.batch_size = 10 environment.case_matcher = MatchersParser.AUTO - # Load test data json_path = Path(__file__).parent / "test_data/json/api_request_handler.json" with open(json_path) as file_json: json_string = json.dumps(json.load(file_json)) test_input = from_json(TestRailSuite, json_string) - + api_request = ApiRequestHandler(environment, api_client, test_input, verify=False) return api_request @@ -39,93 +38,68 @@ def test_add_label_success(self, labels_handler): """Test successful label addition""" # Mock the API client response mock_response = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Test Label"}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Test Label"}, error_message=None ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): result, error = labels_handler.add_label(project_id=1, title="Test Label") - + assert error is None assert result["id"] == 1 assert result["title"] == "Test Label" - + # Verify the API call was made with correct parameters - labels_handler.client.send_post.assert_called_once_with( - "add_label/1", - payload=None, - files={'title': (None, "Test Label")} - ) + labels_handler.client.send_post.assert_called_once_with("add_label/1", payload={"title": "Test Label"}) def test_add_label_api_error(self, labels_handler): """Test label addition with API error""" - mock_response = APIClientResult( - status_code=400, - response_text=None, - error_message="Label title already exists" - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=400, response_text=None, error_message="Label title already exists") + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): result, error = labels_handler.add_label(project_id=1, title="Duplicate Label") - + assert error == "Label title already exists" assert result is None - def test_add_label_multipart_format(self, labels_handler): - """Test label addition uses multipart/form-data format""" + def test_add_label_json_format(self, labels_handler): + """Test label addition uses JSON format""" mock_response = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Test Label"}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Test Label"}, error_message=None ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): result, error = labels_handler.add_label(project_id=1, title="Test Label") - + assert error is None - # Verify multipart/form-data format is used + # Verify JSON format is used call_args = labels_handler.client.send_post.call_args - assert call_args[1]['payload'] is None - assert call_args[1]['files'] == {'title': (None, "Test Label")} + assert call_args[1]["payload"] == {"title": "Test Label"} def test_update_label_success(self, labels_handler): """Test successful label update""" mock_response = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Updated Label"}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Updated Label"}, error_message=None ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): - result, error = labels_handler.update_label( - label_id=1, project_id=1, title="Updated Label" - ) - + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): + result, error = labels_handler.update_label(label_id=1, project_id=1, title="Updated Label") + assert error is None assert result["id"] == 1 assert result["title"] == "Updated Label" - + # Verify the API call was made with correct parameters labels_handler.client.send_post.assert_called_once_with( - "update_label/1", - payload=None, - files={'project_id': (None, '1'), 'title': (None, "Updated Label")} + "update_label/1", payload={"project_id": 1, "title": "Updated Label"} ) def test_update_label_api_error(self, labels_handler): """Test label update with API error""" - mock_response = APIClientResult( - status_code=403, - response_text=None, - error_message="No access to the project" - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): - result, error = labels_handler.update_label( - label_id=1, project_id=1, title="Updated Label" - ) - + mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project") + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): + result, error = labels_handler.update_label(label_id=1, project_id=1, title="Updated Label") + assert error == "No access to the project" assert result is None @@ -133,37 +107,28 @@ def test_get_label_success(self, labels_handler): """Test successful single label retrieval""" mock_response = APIClientResult( status_code=200, - response_text={ - "id": 1, - "title": "Test Label", - "created_by": "2", - "created_on": "1234567890" - }, - error_message=None + response_text={"id": 1, "title": "Test Label", "created_by": "2", "created_on": "1234567890"}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_label(label_id=1) - + assert error is None assert result["id"] == 1 assert result["title"] == "Test Label" assert result["created_by"] == "2" - + # Verify the API call was made with correct parameters labels_handler.client.send_get.assert_called_once_with("get_label/1") def test_get_label_not_found(self, labels_handler): """Test single label retrieval when label not found""" - mock_response = APIClientResult( - status_code=400, - response_text=None, - error_message="Label not found" - ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + mock_response = APIClientResult(status_code=400, response_text=None, error_message="Label not found") + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_label(label_id=999) - + assert error == "Label not found" assert result is None @@ -178,21 +143,21 @@ def test_get_labels_success(self, labels_handler): "_links": {"next": None, "prev": None}, "labels": [ {"id": 1, "title": "Label 1", "created_by": "2", "created_on": "1234567890"}, - {"id": 2, "title": "Label 2", "created_by": "3", "created_on": "1234567891"} - ] + {"id": 2, "title": "Label 2", "created_by": "3", "created_on": "1234567891"}, + ], }, - error_message=None + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_labels(project_id=1) - + assert error is None assert result["size"] == 2 assert len(result["labels"]) == 2 assert result["labels"][0]["id"] == 1 assert result["labels"][1]["id"] == 2 - + # Verify the API call was made with correct parameters labels_handler.client.send_get.assert_called_once_with("get_labels/1") @@ -200,24 +165,18 @@ def test_get_labels_with_pagination(self, labels_handler): """Test labels listing with custom pagination parameters""" mock_response = APIClientResult( status_code=200, - response_text={ - "offset": 10, - "limit": 5, - "size": 0, - "_links": {"next": None, "prev": None}, - "labels": [] - }, - error_message=None + response_text={"offset": 10, "limit": 5, "size": 0, "_links": {"next": None, "prev": None}, "labels": []}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_labels(project_id=1, offset=10, limit=5) - + assert error is None assert result["offset"] == 10 assert result["limit"] == 5 assert len(result["labels"]) == 0 - + # Verify the API call was made with pagination parameters labels_handler.client.send_get.assert_called_once_with("get_labels/1&offset=10&limit=5") @@ -225,127 +184,90 @@ def test_get_labels_with_default_pagination(self, labels_handler): """Test labels listing with default pagination (should not add parameters)""" mock_response = APIClientResult( status_code=200, - response_text={ - "offset": 0, - "limit": 250, - "size": 1, - "labels": [{"id": 1, "title": "Label 1"}] - }, - error_message=None + response_text={"offset": 0, "limit": 250, "size": 1, "labels": [{"id": 1, "title": "Label 1"}]}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_labels(project_id=1, offset=0, limit=250) - + assert error is None # Should call without pagination parameters since they're defaults labels_handler.client.send_get.assert_called_once_with("get_labels/1") def test_get_labels_api_error(self, labels_handler): """Test labels listing with API error""" - mock_response = APIClientResult( - status_code=403, - response_text=None, - error_message="No access to the project" - ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_response): + mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project") + + with patch.object(labels_handler.client, "send_get", return_value=mock_response): result, error = labels_handler.get_labels(project_id=1) - + assert error == "No access to the project" assert result is None def test_delete_labels_success(self, labels_handler): """Test successful label deletion""" - mock_response = APIClientResult( - status_code=200, - response_text="Success", - error_message=None - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None) + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_labels(label_ids=[1, 2, 3]) - + assert success is True assert error is None - + # Verify the API call was made with correct parameters - labels_handler.client.send_post.assert_called_once_with( - "delete_labels", - payload=None, - files={"label_ids": (None, "[1, 2, 3]")} - ) + labels_handler.client.send_post.assert_called_once_with("delete_labels", payload={"label_ids": [1, 2, 3]}) def test_delete_label_single_id(self, labels_handler): """Test single label deletion""" - mock_response = APIClientResult( - status_code=200, - response_text="Success", - error_message=None - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None) + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_label(label_id=1) - + assert success is True assert error is None - - labels_handler.client.send_post.assert_called_once_with( - "delete_label/1" - ) + + labels_handler.client.send_post.assert_called_once_with("delete_label/1") def test_delete_labels_batch(self, labels_handler): """Test batch label deletion with multiple IDs""" - mock_response = APIClientResult( - status_code=200, - response_text="Success", - error_message=None - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=200, response_text="Success", error_message=None) + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_labels(label_ids=[1, 2, 3]) - + assert success is True assert error is None - - labels_handler.client.send_post.assert_called_once_with( - "delete_labels", - payload=None, - files={"label_ids": (None, "[1, 2, 3]")} - ) + + labels_handler.client.send_post.assert_called_once_with("delete_labels", payload={"label_ids": [1, 2, 3]}) def test_delete_labels_api_error(self, labels_handler): """Test label deletion with API error""" mock_response = APIClientResult( - status_code=400, - response_text=None, - error_message="One or more labels not found" + status_code=400, response_text=None, error_message="One or more labels not found" ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_labels(label_ids=[999, 1000]) - + assert success is False assert error == "One or more labels not found" def test_delete_labels_forbidden(self, labels_handler): """Test label deletion with forbidden access""" - mock_response = APIClientResult( - status_code=403, - response_text=None, - error_message="No access to the project" - ) - - with patch.object(labels_handler.client, 'send_post', return_value=mock_response): + mock_response = APIClientResult(status_code=403, response_text=None, error_message="No access to the project") + + with patch.object(labels_handler.client, "send_post", return_value=mock_response): success, error = labels_handler.delete_labels(label_ids=[1]) - + assert success is False - assert error == "No access to the project" + assert error == "No access to the project" class TestApiRequestHandlerLabelsCases: """Test cases for test case label operations""" - + def setup_method(self): """Set up test fixtures""" # Create proper objects like the existing fixture @@ -353,355 +275,345 @@ def setup_method(self): environment = Environment() environment.project = "Test Project" environment.batch_size = 10 - + # Create a minimal TestRailSuite for testing from trcli.data_classes.dataclass_testrail import TestRailSuite + test_suite = TestRailSuite(name="Test Suite") - + self.labels_handler = ApiRequestHandler(environment, api_client, test_suite, verify=False) - + def test_add_labels_to_cases_success(self): """Test successful addition of labels to test cases""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler, 'add_label') as mock_add_label, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ - patch.object(self.labels_handler.client, 'send_post') as mock_send_post: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels, patch.object( + self.labels_handler.label_manager, "add_label" + ) as mock_add_label, patch.object( + self.labels_handler.client, "send_get" + ) as mock_send_get, patch.object( + self.labels_handler.client, "send_post" + ) as mock_send_post: + # Mock __get_all_cases response (cases exist) - mock_get_cases.return_value = ([ - {"id": 1, "title": "Case 1", "suite_id": 1}, - {"id": 2, "title": "Case 2", "suite_id": 1} - ], "") - + mock_get_cases.return_value = ( + [{"id": 1, "title": "Case 1", "suite_id": 1}, {"id": 2, "title": "Case 2", "suite_id": 1}], + "", + ) + # Mock get_labels response (label doesn't exist) mock_get_labels.return_value = ({"labels": []}, "") - + # Mock add_label response (create new label) mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "") - + # Mock get_case responses mock_send_get.side_effect = [ - MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}), # Case 1 - MagicMock(status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}) # Case 2 + MagicMock( + status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}, error_message="" + ), # Case 1 + MagicMock( + status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 2"}, error_message="" + ), # Case 2 ] - + # Mock update_cases batch response (for multiple cases) - mock_send_post.return_value = MagicMock(status_code=200) - + mock_send_post.return_value = MagicMock(status_code=200, error_message="") + # Test the method results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1, 2], - title="test-label", - project_id=1 + case_ids=[1, 2], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 2 - assert len(results['failed_cases']) == 0 - assert len(results['max_labels_reached']) == 0 - assert len(results['case_not_found']) == 0 - + assert len(results["successful_cases"]) == 2 + assert len(results["failed_cases"]) == 0 + assert len(results["max_labels_reached"]) == 0 + assert len(results["case_not_found"]) == 0 + # Verify API calls - should be called twice: once for multi-suite detection, once for case validation assert mock_get_cases.call_count == 2 - mock_get_cases.assert_has_calls([ - call(1, None), # Multi-suite detection - call(1, None) # Case validation - ]) + mock_get_cases.assert_has_calls([call(1, None), call(1, None)]) # Multi-suite detection # Case validation mock_get_labels.assert_called_once_with(1) mock_add_label.assert_called_once_with(1, "test-label") assert mock_send_get.call_count == 2 # Should call update_cases/{suite_id} once for batch update - mock_send_post.assert_called_once_with("update_cases/1", payload={ - 'case_ids': [1, 2], - 'labels': [5] - }) - + mock_send_post.assert_called_once_with("update_cases/1", payload={"case_ids": [1, 2], "labels": [5]}) + def test_add_labels_to_cases_single_case(self): """Test adding labels to a single test case using update_case endpoint""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler, 'add_label') as mock_add_label, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ - patch.object(self.labels_handler.client, 'send_post') as mock_send_post: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels, patch.object( + self.labels_handler.label_manager, "add_label" + ) as mock_add_label, patch.object( + self.labels_handler.client, "send_get" + ) as mock_send_get, patch.object( + self.labels_handler.client, "send_post" + ) as mock_send_post: + # Mock __get_all_cases response (case exists) - mock_get_cases.return_value = ([ - {"id": 1, "title": "Case 1"} - ], "") - + mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") + # Mock get_labels response (label doesn't exist) mock_get_labels.return_value = ({"labels": []}, "") - + # Mock add_label response (create new label) mock_add_label.return_value = ({"label": {"id": 5, "title": "test-label"}}, "") - + # Mock get_case response mock_send_get.return_value = MagicMock( - status_code=200, - response_text={"labels": [], "suite_id": 1, "title": "Case 1"} + status_code=200, response_text={"labels": [], "suite_id": 1, "title": "Case 1"}, error_message="" ) - + # Mock update_case response (for single case) - mock_send_post.return_value = MagicMock(status_code=200) - + mock_send_post.return_value = MagicMock(status_code=200, error_message="") + # Test the method with single case results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1], - title="test-label", - project_id=1 + case_ids=[1], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 1 - assert len(results['failed_cases']) == 0 - assert len(results['max_labels_reached']) == 0 - assert len(results['case_not_found']) == 0 - + assert len(results["successful_cases"]) == 1 + assert len(results["failed_cases"]) == 0 + assert len(results["max_labels_reached"]) == 0 + assert len(results["case_not_found"]) == 0 + # Verify API calls assert mock_get_cases.call_count == 2 mock_get_labels.assert_called_once_with(1) mock_add_label.assert_called_once_with(1, "test-label") assert mock_send_get.call_count == 1 # Should call update_case/{case_id} once for single case - mock_send_post.assert_called_once_with("update_case/1", payload={'labels': [5]}) + mock_send_post.assert_called_once_with("update_case/1", payload={"labels": [5]}) def test_add_labels_to_cases_existing_label(self): """Test adding labels when label already exists""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler, 'add_label') as mock_add_label, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get, \ - patch.object(self.labels_handler.client, 'send_post') as mock_send_post: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels, patch.object( + self.labels_handler.label_manager, "add_label" + ) as mock_add_label, patch.object( + self.labels_handler.client, "send_get" + ) as mock_send_get, patch.object( + self.labels_handler.client, "send_post" + ) as mock_send_post: + # Mock __get_all_cases response (case exists) mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") - + # Mock get_labels response (label exists) mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") - + # Mock get_case response - mock_send_get.return_value = MagicMock(status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"}) - + mock_send_get.return_value = MagicMock( + status_code=200, response_text={"labels": [], "section_id": 1, "title": "Case 1"}, error_message="" + ) + # Mock add_label_to_case response - mock_send_post.return_value = MagicMock(status_code=200) - + mock_send_post.return_value = MagicMock(status_code=200, error_message="") + # Test the method results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1], - title="test-label", - project_id=1 + case_ids=[1], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 1 - assert len(results['case_not_found']) == 0 - + assert len(results["successful_cases"]) == 1 + assert len(results["case_not_found"]) == 0 + # Verify add_label was not called (label already exists) mock_add_label.assert_not_called() - + def test_add_labels_to_cases_max_labels_reached(self): """Test handling of maximum labels limit (10)""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels, patch.object(self.labels_handler.client, "send_get") as mock_send_get: + # Mock __get_all_cases response (case exists) mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") - + # Mock get_labels response mock_get_labels.return_value = ({"labels": [{"id": 15, "title": "test-label"}]}, "") - + # Mock get_case response with 10 existing labels (different from test-label) existing_labels = [{"id": i, "title": f"label-{i}"} for i in range(1, 11)] mock_send_get.return_value = MagicMock( - status_code=200, - response_text={"labels": existing_labels} + status_code=200, response_text={"labels": existing_labels}, error_message="" ) - + # Test the method results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1], - title="test-label", - project_id=1 + case_ids=[1], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 0 - assert len(results['failed_cases']) == 0 - assert len(results['max_labels_reached']) == 1 - assert len(results['case_not_found']) == 0 - assert results['max_labels_reached'][0] == 1 - + assert len(results["successful_cases"]) == 0 + assert len(results["failed_cases"]) == 0 + assert len(results["max_labels_reached"]) == 1 + assert len(results["case_not_found"]) == 0 + assert results["max_labels_reached"][0] == 1 + def test_add_labels_to_cases_label_already_on_case(self): """Test handling when label already exists on case""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels, \ - patch.object(self.labels_handler.client, 'send_get') as mock_send_get: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels, patch.object(self.labels_handler.client, "send_get") as mock_send_get: + # Mock __get_all_cases response (case exists) mock_get_cases.return_value = ([{"id": 1, "title": "Case 1"}], "") - + # Mock get_labels response mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") - + # Mock get_case response with the label already present mock_send_get.return_value = MagicMock( - status_code=200, - response_text={"labels": [{"id": 5, "title": "test-label"}]} + status_code=200, response_text={"labels": [{"id": 5, "title": "test-label"}]}, error_message="" ) - + # Test the method results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[1], - title="test-label", - project_id=1 + case_ids=[1], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - assert len(results['successful_cases']) == 1 - assert len(results['case_not_found']) == 0 - assert "already exists" in results['successful_cases'][0]['message'] + assert len(results["successful_cases"]) == 1 + assert len(results["case_not_found"]) == 0 + assert "already exists" in results["successful_cases"][0]["message"] def test_add_labels_to_cases_case_not_found(self): """Test handling when case IDs don't exist""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases: + # Mock __get_all_cases response (no cases exist) mock_get_cases.return_value = ([], "") - + # Test the method with case IDs that don't exist results, error_message = self.labels_handler.add_labels_to_cases( - case_ids=[999, 1000, 1001], - title="test-label", - project_id=1 + case_ids=[999, 1000, 1001], title="test-label", project_id=1 ) - + # Verify no error assert error_message == "" - + # Verify results - all cases should be in case_not_found - assert len(results['case_not_found']) == 3 - assert 999 in results['case_not_found'] - assert 1000 in results['case_not_found'] - assert 1001 in results['case_not_found'] - + assert len(results["case_not_found"]) == 3 + assert 999 in results["case_not_found"] + assert 1000 in results["case_not_found"] + assert 1001 in results["case_not_found"] + # Verify that no other processing happened since no valid cases - assert len(results['successful_cases']) == 0 - assert len(results['failed_cases']) == 0 - assert len(results['max_labels_reached']) == 0 - + assert len(results["successful_cases"]) == 0 + assert len(results["failed_cases"]) == 0 + assert len(results["max_labels_reached"]) == 0 + def test_get_cases_by_label_with_label_ids(self): """Test getting cases by label IDs""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases: + # Mock cases response mock_cases = [ {"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "label1"}]}, {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "label2"}]}, - {"id": 3, "title": "Test Case 3", "labels": [{"id": 5, "title": "label1"}]} + {"id": 3, "title": "Test Case 3", "labels": [{"id": 5, "title": "label1"}]}, ] mock_get_cases.return_value = (mock_cases, "") - + # Test the method matching_cases, error_message = self.labels_handler.get_cases_by_label( - project_id=1, - suite_id=None, - label_ids=[5] + project_id=1, suite_id=None, label_ids=[5] ) - + # Verify no error assert error_message == "" - + # Verify results (should return cases 1 and 3) assert len(matching_cases) == 2 - assert matching_cases[0]['id'] == 1 - assert matching_cases[1]['id'] == 3 - + assert matching_cases[0]["id"] == 1 + assert matching_cases[1]["id"] == 3 + def test_get_cases_by_label_with_title(self): """Test getting cases by label title""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels: + # Mock labels response mock_get_labels.return_value = ({"labels": [{"id": 5, "title": "test-label"}]}, "") - + # Mock cases response mock_cases = [ {"id": 1, "title": "Test Case 1", "labels": [{"id": 5, "title": "test-label"}]}, - {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "other-label"}]} + {"id": 2, "title": "Test Case 2", "labels": [{"id": 6, "title": "other-label"}]}, ] mock_get_cases.return_value = (mock_cases, "") - + # Test the method matching_cases, error_message = self.labels_handler.get_cases_by_label( - project_id=1, - suite_id=None, - label_title="test-label" + project_id=1, suite_id=None, label_title="test-label" ) - + # Verify no error assert error_message == "" - + # Verify results (should return case 1) assert len(matching_cases) == 1 - assert matching_cases[0]['id'] == 1 - + assert matching_cases[0]["id"] == 1 + def test_get_cases_by_label_title_not_found(self): """Test getting cases by non-existent label title""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases, \ - patch.object(self.labels_handler, 'get_labels') as mock_get_labels: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases, patch.object( + self.labels_handler.label_manager, "get_labels" + ) as mock_get_labels: + # Mock labels response (no matching label) mock_get_labels.return_value = ({"labels": []}, "") - + # Mock get_all_cases to return empty (not called due to early return) mock_get_cases.return_value = ([], "") - + # Test the method matching_cases, error_message = self.labels_handler.get_cases_by_label( - project_id=1, - suite_id=None, - label_title="non-existent-label" + project_id=1, suite_id=None, label_title="non-existent-label" ) - + # Verify error assert error_message == "" assert matching_cases == [] - + def test_get_cases_by_label_no_matching_cases(self): """Test getting cases when no cases have the specified label""" - with patch.object(self.labels_handler, '_ApiRequestHandler__get_all_cases') as mock_get_cases: - + with patch.object(self.labels_handler, "_ApiRequestHandler__get_all_cases") as mock_get_cases: + # Mock cases response (no cases with target label) mock_cases = [ {"id": 1, "title": "Test Case 1", "labels": [{"id": 6, "title": "other-label"}]}, - {"id": 2, "title": "Test Case 2", "labels": []} + {"id": 2, "title": "Test Case 2", "labels": []}, ] mock_get_cases.return_value = (mock_cases, "") - + # Test the method matching_cases, error_message = self.labels_handler.get_cases_by_label( - project_id=1, - suite_id=None, - label_ids=[5] + project_id=1, suite_id=None, label_ids=[5] ) - + # Verify no error but no results assert error_message == "" assert len(matching_cases) == 0 @@ -714,362 +626,279 @@ def test_add_labels_to_tests_success_single(self, labels_handler): """Test successful label addition to a single test""" # Mock test validation mock_test_response = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, error_message=None ) - + # Mock run validation mock_run_response = APIClientResult( - status_code=200, - response_text={"id": 1, "project_id": 1}, - error_message=None + status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None ) - + # Mock existing labels - mock_labels_response = APIClientResult( - status_code=200, - response_text={"labels": []}, - error_message=None - ) - + mock_labels_response = APIClientResult(status_code=200, response_text={"labels": []}, error_message=None) + # Mock label creation mock_add_label_response = APIClientResult( - status_code=200, - response_text={"id": 5, "title": "Test Label"}, - error_message=None + status_code=200, response_text={"id": 5, "title": "Test Label"}, error_message=None ) - + # Mock test update mock_update_response = APIClientResult( - status_code=200, - response_text={"id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, - error_message=None + status_code=200, response_text={"id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, error_message=None ) - - with patch.object(labels_handler.client, 'send_get') as mock_get, \ - patch.object(labels_handler.client, 'send_post') as mock_post: - + + with patch.object(labels_handler.client, "send_get") as mock_get, patch.object( + labels_handler.client, "send_post" + ) as mock_post: + # Setup get responses for validation and label retrieval mock_get.side_effect = [ mock_test_response, # get_test/{test_id} - mock_run_response, # get_run/{run_id} - mock_labels_response, # get_labels + mock_run_response, # get_run/{run_id} + mock_labels_response, # get_labels mock_test_response, # get_test/{test_id} again for labels check ] - + # Setup post responses for label creation and test update - mock_post.side_effect = [ - mock_add_label_response, # add_label - mock_update_response # update_test - ] - - result, error = labels_handler.add_labels_to_tests( - test_ids=[1], - titles="Test Label", - project_id=1 - ) - + mock_post.side_effect = [mock_add_label_response, mock_update_response] # add_label # update_test + + result, error = labels_handler.add_labels_to_tests(test_ids=[1], titles="Test Label", project_id=1) + assert error == "" - assert len(result['successful_tests']) == 1 - assert len(result['failed_tests']) == 0 - assert len(result['test_not_found']) == 0 - assert len(result['max_labels_reached']) == 0 + assert len(result["successful_tests"]) == 1 + assert len(result["failed_tests"]) == 0 + assert len(result["test_not_found"]) == 0 + assert len(result["max_labels_reached"]) == 0 def test_add_labels_to_tests_test_not_found(self, labels_handler): """Test handling of non-existent test IDs""" # Mock test not found - mock_test_response = APIClientResult( - status_code=404, - response_text=None, - error_message="Test not found" - ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): - result, error = labels_handler.add_labels_to_tests( - test_ids=[999], - titles="Test Label", - project_id=1 - ) - + mock_test_response = APIClientResult(status_code=404, response_text=None, error_message="Test not found") + + with patch.object(labels_handler.client, "send_get", return_value=mock_test_response): + result, error = labels_handler.add_labels_to_tests(test_ids=[999], titles="Test Label", project_id=1) + assert error == "" - assert len(result['test_not_found']) == 1 - assert 999 in result['test_not_found'] + assert len(result["test_not_found"]) == 1 + assert 999 in result["test_not_found"] def test_add_labels_to_tests_max_labels_reached(self, labels_handler): """Test handling of tests that already have maximum labels""" # Create 10 existing labels existing_labels = [{"id": i, "title": f"Label {i}"} for i in range(1, 11)] - + # Mock test with max labels mock_test_response = APIClientResult( status_code=200, response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": existing_labels}, - error_message=None + error_message=None, ) - + # Mock run validation mock_run_response = APIClientResult( - status_code=200, - response_text={"id": 1, "project_id": 1}, - error_message=None + status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None ) - + # Mock existing labels - mock_labels_response = APIClientResult( - status_code=200, - response_text={"labels": []}, - error_message=None - ) - + mock_labels_response = APIClientResult(status_code=200, response_text={"labels": []}, error_message=None) + # Mock label creation mock_add_label_response = APIClientResult( - status_code=200, - response_text={"id": 11, "title": "New Label"}, - error_message=None + status_code=200, response_text={"id": 11, "title": "New Label"}, error_message=None ) - - with patch.object(labels_handler.client, 'send_get') as mock_get, \ - patch.object(labels_handler.client, 'send_post') as mock_post: - + + with patch.object(labels_handler.client, "send_get") as mock_get, patch.object( + labels_handler.client, "send_post" + ) as mock_post: + mock_get.side_effect = [ - mock_test_response, # get_test/{test_id} - mock_run_response, # get_run/{run_id} - mock_labels_response, # get_labels - mock_test_response, # get_test/{test_id} again for labels check + mock_test_response, # get_test/{test_id} + mock_run_response, # get_run/{run_id} + mock_labels_response, # get_labels + mock_test_response, # get_test/{test_id} again for labels check ] - + mock_post.return_value = mock_add_label_response - - result, error = labels_handler.add_labels_to_tests( - test_ids=[1], - titles="New Label", - project_id=1 - ) - + + result, error = labels_handler.add_labels_to_tests(test_ids=[1], titles="New Label", project_id=1) + assert error == "" - assert len(result['max_labels_reached']) == 1 - assert 1 in result['max_labels_reached'] + assert len(result["max_labels_reached"]) == 1 + assert 1 in result["max_labels_reached"] def test_get_tests_by_label_success(self, labels_handler): """Test successful retrieval of tests by label""" # Mock runs response mock_runs_response = APIClientResult( - status_code=200, - response_text={"runs": [{"id": 1}, {"id": 2}]}, - error_message=None + status_code=200, response_text={"runs": [{"id": 1}, {"id": 2}]}, error_message=None ) - + # Mock tests responses for each run mock_tests_response_run1 = APIClientResult( status_code=200, - response_text={"tests": [ - {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}, - {"id": 2, "title": "Test 2", "labels": []} - ]}, - error_message=None + response_text={ + "tests": [ + {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}, + {"id": 2, "title": "Test 2", "labels": []}, + ] + }, + error_message=None, ) - + mock_tests_response_run2 = APIClientResult( status_code=200, - response_text={"tests": [ - {"id": 3, "title": "Test 3", "labels": [{"id": 5, "title": "Test Label"}]} - ]}, - error_message=None + response_text={"tests": [{"id": 3, "title": "Test 3", "labels": [{"id": 5, "title": "Test Label"}]}]}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get') as mock_get: + + with patch.object(labels_handler.client, "send_get") as mock_get: mock_get.side_effect = [ - mock_runs_response, # get_runs/{project_id} + mock_runs_response, # get_runs/{project_id} mock_tests_response_run1, # get_tests/{run_id} for run 1 - mock_tests_response_run2 # get_tests/{run_id} for run 2 + mock_tests_response_run2, # get_tests/{run_id} for run 2 ] - - result, error = labels_handler.get_tests_by_label( - project_id=1, - label_ids=[5] - ) - + + result, error = labels_handler.get_tests_by_label(project_id=1, label_ids=[5]) + assert error == "" assert len(result) == 2 - assert result[0]['id'] == 1 - assert result[1]['id'] == 3 + assert result[0]["id"] == 1 + assert result[1]["id"] == 3 def test_get_tests_by_label_with_run_ids(self, labels_handler): """Test retrieval of tests by label filtered by specific run IDs""" # Mock run responses for specific run IDs mock_run_response_1 = APIClientResult( - status_code=200, - response_text={"id": 1, "name": "Test Run 1"}, - error_message=None + status_code=200, response_text={"id": 1, "name": "Test Run 1"}, error_message=None ) - + mock_run_response_2 = APIClientResult( - status_code=200, - response_text={"id": 2, "name": "Test Run 2"}, - error_message=None + status_code=200, response_text={"id": 2, "name": "Test Run 2"}, error_message=None ) - + # Mock tests responses for each run mock_tests_response_run1 = APIClientResult( status_code=200, - response_text={"tests": [ - {"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]} - ]}, - error_message=None + response_text={"tests": [{"id": 1, "title": "Test 1", "labels": [{"id": 5, "title": "Test Label"}]}]}, + error_message=None, ) - + mock_tests_response_run2 = APIClientResult( status_code=200, - response_text={"tests": [ - {"id": 2, "title": "Test 2", "labels": [{"id": 5, "title": "Test Label"}]} - ]}, - error_message=None + response_text={"tests": [{"id": 2, "title": "Test 2", "labels": [{"id": 5, "title": "Test Label"}]}]}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get') as mock_get: + + with patch.object(labels_handler.client, "send_get") as mock_get: mock_get.side_effect = [ - mock_run_response_1, # get_run/1 - mock_run_response_2, # get_run/2 + mock_run_response_1, # get_run/1 + mock_run_response_2, # get_run/2 mock_tests_response_run1, # get_tests/1 - mock_tests_response_run2 # get_tests/2 + mock_tests_response_run2, # get_tests/2 ] - - result, error = labels_handler.get_tests_by_label( - project_id=1, - label_ids=[5], - run_ids=[1, 2] - ) - + + result, error = labels_handler.get_tests_by_label(project_id=1, label_ids=[5], run_ids=[1, 2]) + assert error == "" assert len(result) == 2 - assert result[0]['id'] == 1 - assert result[1]['id'] == 2 + assert result[0]["id"] == 1 + assert result[1]["id"] == 2 def test_get_test_labels_success(self, labels_handler): """Test successful retrieval of test labels""" # Mock test responses mock_test_response1 = APIClientResult( status_code=200, - response_text={ - "id": 1, - "title": "Test 1", - "status_id": 1, - "labels": [{"id": 5, "title": "Test Label"}] - }, - error_message=None + response_text={"id": 1, "title": "Test 1", "status_id": 1, "labels": [{"id": 5, "title": "Test Label"}]}, + error_message=None, ) - + mock_test_response2 = APIClientResult( status_code=200, - response_text={ - "id": 2, - "title": "Test 2", - "status_id": 2, - "labels": [] - }, - error_message=None + response_text={"id": 2, "title": "Test 2", "status_id": 2, "labels": []}, + error_message=None, ) - - with patch.object(labels_handler.client, 'send_get') as mock_get: + + with patch.object(labels_handler.client, "send_get") as mock_get: mock_get.side_effect = [mock_test_response1, mock_test_response2] - + result, error = labels_handler.get_test_labels([1, 2]) - + assert error == "" assert len(result) == 2 - + # Check first test - assert result[0]['test_id'] == 1 - assert result[0]['title'] == "Test 1" - assert result[0]['status_id'] == 1 - assert len(result[0]['labels']) == 1 - assert result[0]['labels'][0]['title'] == "Test Label" - assert result[0]['error'] is None - + assert result[0]["test_id"] == 1 + assert result[0]["title"] == "Test 1" + assert result[0]["status_id"] == 1 + assert len(result[0]["labels"]) == 1 + assert result[0]["labels"][0]["title"] == "Test Label" + assert result[0]["error"] is None + # Check second test - assert result[1]['test_id'] == 2 - assert result[1]['title'] == "Test 2" - assert result[1]['status_id'] == 2 - assert len(result[1]['labels']) == 0 - assert result[1]['error'] is None + assert result[1]["test_id"] == 2 + assert result[1]["title"] == "Test 2" + assert result[1]["status_id"] == 2 + assert len(result[1]["labels"]) == 0 + assert result[1]["error"] is None def test_get_test_labels_test_not_found(self, labels_handler): """Test handling of non-existent test IDs in get_test_labels""" # Mock test not found - mock_test_response = APIClientResult( - status_code=404, - response_text=None, - error_message="Test not found" - ) - - with patch.object(labels_handler.client, 'send_get', return_value=mock_test_response): + mock_test_response = APIClientResult(status_code=404, response_text=None, error_message="Test not found") + + with patch.object(labels_handler.client, "send_get", return_value=mock_test_response): result, error = labels_handler.get_test_labels([999]) - + assert error == "" assert len(result) == 1 - assert result[0]['test_id'] == 999 - assert result[0]['error'] == "Test 999 not found or inaccessible" - assert result[0]['labels'] == [] + assert result[0]["test_id"] == 999 + assert result[0]["error"] == "Test 999 not found or inaccessible" + assert result[0]["labels"] == [] def test_add_labels_to_tests_batch_update(self, labels_handler): """Test batch update of multiple tests""" # Mock test validation for multiple tests mock_test_response1 = APIClientResult( - status_code=200, - response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, - error_message=None + status_code=200, response_text={"id": 1, "title": "Test 1", "run_id": 1, "labels": []}, error_message=None ) - + mock_test_response2 = APIClientResult( - status_code=200, - response_text={"id": 2, "title": "Test 2", "run_id": 1, "labels": []}, - error_message=None + status_code=200, response_text={"id": 2, "title": "Test 2", "run_id": 1, "labels": []}, error_message=None ) - + # Mock run validation mock_run_response = APIClientResult( - status_code=200, - response_text={"id": 1, "project_id": 1}, - error_message=None + status_code=200, response_text={"id": 1, "project_id": 1}, error_message=None ) - + # Mock existing labels mock_labels_response = APIClientResult( - status_code=200, - response_text={"labels": [{"id": 5, "title": "Test Label"}]}, - error_message=None + status_code=200, response_text={"labels": [{"id": 5, "title": "Test Label"}]}, error_message=None ) - + # Mock batch update - mock_batch_response = APIClientResult( - status_code=200, - response_text={"updated": 2}, - error_message=None - ) - - with patch.object(labels_handler.client, 'send_get') as mock_get, \ - patch.object(labels_handler.client, 'send_post') as mock_post: - + mock_batch_response = APIClientResult(status_code=200, response_text={"updated": 2}, error_message=None) + + with patch.object(labels_handler.client, "send_get") as mock_get, patch.object( + labels_handler.client, "send_post" + ) as mock_post: + # Setup get responses mock_get.side_effect = [ mock_test_response1, # get_test/1 - mock_run_response, # get_run/1 + mock_run_response, # get_run/1 mock_test_response2, # get_test/2 - mock_run_response, # get_run/1 - mock_labels_response, # get_labels + mock_run_response, # get_run/1 + mock_labels_response, # get_labels mock_test_response1, # get_test/1 for labels check mock_test_response2, # get_test/2 for labels check ] - + # Setup batch update response mock_post.return_value = mock_batch_response - - result, error = labels_handler.add_labels_to_tests( - test_ids=[1, 2], - titles="Test Label", - project_id=1 - ) - + + result, error = labels_handler.add_labels_to_tests(test_ids=[1, 2], titles="Test Label", project_id=1) + assert error == "" - assert len(result['successful_tests']) == 2 \ No newline at end of file + assert len(result["successful_tests"]) == 2 diff --git a/tests/test_cmd_export_gherkin.py b/tests/test_cmd_export_gherkin.py new file mode 100644 index 0000000..e5ed788 --- /dev/null +++ b/tests/test_cmd_export_gherkin.py @@ -0,0 +1,257 @@ +import pytest +from unittest import mock +from unittest.mock import MagicMock, patch +from click.testing import CliRunner +from pathlib import Path + +from trcli.cli import Environment +from trcli.commands import cmd_export_gherkin + + +class TestCmdExportGherkin: + """Test class for export_gherkin command functionality""" + + def setup_method(self): + """Set up test environment and runner""" + self.runner = CliRunner() + self.sample_feature_content = """@smoke +Feature: User Login + As a user + I want to log in + + Scenario: Successful login + Given I am on the login page + When I enter valid credentials + Then I should see the dashboard +""" + + # Set up environment with required parameters + self.environment = Environment(cmd="export_gherkin") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_success_to_file(self, mock_api_client_class, mock_api_handler_class): + """Test successful export to file""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + with self.runner.isolated_filesystem(): + result = self.runner.invoke( + cmd_export_gherkin.cli, ["--case-id", "456", "--output", "exported.feature"], obj=self.environment + ) + + assert result.exit_code == 0 + assert "successfully exported" in result.output.lower() + assert "exported.feature" in result.output + + # Verify file was created with correct content + with open("exported.feature", "r") as f: + content = f.read() + assert "Feature: User Login" in content + assert "@smoke" in content + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_success_to_stdout(self, mock_api_client_class, mock_api_handler_class): + """Test successful export to stdout""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456"], obj=self.environment) + + assert result.exit_code == 0 + # Content should be printed to stdout + assert "Feature: User Login" in result.output + assert "@smoke" in result.output + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_verbose_logging(self, mock_api_client_class, mock_api_handler_class): + """Test export with verbose logging""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + # Enable verbose mode via environment (verbose is now a global option) + self.environment.verbose = True + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456"], obj=self.environment) + + assert result.exit_code == 0 + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_api_error_case_not_found(self, mock_api_client_class, mock_api_handler_class): + """Test API error when case not found""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with error + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = ("", "Failed to retrieve BDD test case (HTTP 404)") + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "99999"], obj=self.environment) + + assert result.exit_code == 1 + assert "error" in result.output.lower() + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_empty_content(self, mock_api_client_class, mock_api_handler_class): + """Test when no BDD content is returned""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with empty content + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = ("", "") # Empty content, no error + + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456"], obj=self.environment) + + assert result.exit_code == 1 + assert "no bdd content found" in result.output.lower() + + @pytest.mark.cmd_export_gherkin + def test_export_gherkin_required_parameters(self): + """Test that required parameters are validated""" + # Missing --case-id + result = self.runner.invoke(cmd_export_gherkin.cli, ["--project-id", "1"]) + assert result.exit_code == 2 # Click error for missing required option + + # Missing --project-id (handled by check_for_required_parameters) + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "456"]) + # Will fail due to missing required params + + @pytest.mark.cmd_export_gherkin + def test_export_gherkin_invalid_case_id(self): + """Test with invalid case ID (negative or zero)""" + result = self.runner.invoke(cmd_export_gherkin.cli, ["--case-id", "-1"], obj=self.environment) + + # Click IntRange validation should catch this + assert result.exit_code == 2 + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + @patch("builtins.open", side_effect=PermissionError("Permission denied")) + def test_export_gherkin_permission_error(self, mock_open, mock_api_client_class, mock_api_handler_class): + """Test file write permission error""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + result = self.runner.invoke( + cmd_export_gherkin.cli, + ["--case-id", "456", "--output", "/root/no_permission.feature"], + obj=self.environment, + ) + + assert result.exit_code == 1 + # Check for various error messages related to file writing + assert ( + "permission denied" in result.output.lower() + or "read-only file system" in result.output.lower() + or "error writing file" in result.output.lower() + ) + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_create_nested_directory(self, mock_api_client_class, mock_api_handler_class): + """Test that parent directories are created if they don't exist""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (self.sample_feature_content, "") + + with self.runner.isolated_filesystem(): + output_path = "nested/dir/exported.feature" + result = self.runner.invoke( + cmd_export_gherkin.cli, ["--case-id", "456", "--output", output_path], obj=self.environment + ) + + assert result.exit_code == 0 + # Verify nested directory was created + assert Path(output_path).exists() + assert Path(output_path).is_file() + + @pytest.mark.cmd_export_gherkin + @patch("trcli.commands.cmd_export_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_export_gherkin.APIClient") + def test_export_gherkin_unicode_content(self, mock_api_client_class, mock_api_handler_class): + """Test export with unicode characters""" + unicode_content = """@test +Feature: Tëst with ūnīcödé 测试 + Scenario: Test scenario + Given test step with émojis 🎉 +""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.get_bdd.return_value = (unicode_content, "") + + with self.runner.isolated_filesystem(): + result = self.runner.invoke( + cmd_export_gherkin.cli, ["--case-id", "456", "--output", "unicode.feature"], obj=self.environment + ) + + assert result.exit_code == 0 + + # Verify unicode content is preserved + with open("unicode.feature", "r", encoding="utf-8") as f: + content = f.read() + assert "ūnīcödé" in content + assert "测试" in content + assert "🎉" in content diff --git a/tests/test_cmd_import_gherkin.py b/tests/test_cmd_import_gherkin.py new file mode 100644 index 0000000..16c95cc --- /dev/null +++ b/tests/test_cmd_import_gherkin.py @@ -0,0 +1,258 @@ +import pytest +import json +from unittest import mock +from unittest.mock import MagicMock, patch +from click.testing import CliRunner +from pathlib import Path + +from trcli.cli import Environment +from trcli.commands import cmd_import_gherkin + + +class TestCmdImportGherkin: + """Test class for import_gherkin command functionality""" + + def setup_method(self): + """Set up test environment and runner""" + self.runner = CliRunner() + self.test_feature_path = str(Path(__file__).parent / "test_data" / "FEATURE" / "sample_bdd.feature") + + # Set up environment with required parameters + self.environment = Environment(cmd="import_gherkin") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_success(self, mock_api_client_class, mock_api_handler_class): + """Test successful feature file upload""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([456], "") # Success: case ID 456, no error + + with self.runner.isolated_filesystem(): + # Create test feature file + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test scenario\n Given test step\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "test.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 0 + assert "successfully uploaded" in result.output.lower() + assert "456" in result.output + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_json_output(self, mock_api_client_class, mock_api_handler_class): + """Test feature file upload with JSON output""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([101, 102], "") # Success: 2 case IDs + + with self.runner.isolated_filesystem(): + # Create test feature file + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test 1\n Scenario: Test 2\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--section-id", "123", "--json-output"], + obj=self.environment, + ) + + assert result.exit_code == 0 + # Output contains logging messages + JSON, extract JSON (starts with '{') + json_start = result.output.find("{") + assert json_start >= 0, "No JSON found in output" + json_str = result.output[json_start:] + output_data = json.loads(json_str) + assert "case_ids" in output_data + assert output_data["case_ids"] == [101, 102] + assert output_data["count"] == 2 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_verbose_logging(self, mock_api_client_class, mock_api_handler_class): + """Test feature file upload with verbose logging""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([456], "") + + # Enable verbose mode via environment (verbose is now a global option) + self.environment.verbose = True + + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--section-id", "123"], + obj=self.environment, + ) + + assert result.exit_code == 0 + # Verbose output should show API endpoint + # (verbose logs might not appear in captured output but command should succeed) + + @pytest.mark.cmd_import_gherkin + def test_import_gherkin_missing_file(self): + """Test with non-existent file""" + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "/nonexistent/file.feature", "--section-id", "123"], obj=self.environment + ) + + # Click returns exit code 2 for invalid parameter (file doesn't exist) + assert result.exit_code in [1, 2] + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_empty_file(self, mock_api_client_class, mock_api_handler_class): + """Test with empty feature file""" + with self.runner.isolated_filesystem(): + # Create empty file + with open("empty.feature", "w") as f: + f.write("") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "empty.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 1 + assert "empty" in result.output.lower() + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_api_error(self, mock_api_client_class, mock_api_handler_class): + """Test API error handling""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with error + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([], "API Error: Section not found") + + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "test.feature", "--section-id", "999"], obj=self.environment + ) + + assert result.exit_code == 1 + assert "error" in result.output.lower() + assert "section not found" in result.output.lower() + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_no_cases_created(self, mock_api_client_class, mock_api_handler_class): + """Test when no case IDs are returned from API""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler with empty case IDs + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([], "") # No error, but no cases created + + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "test.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 0 # Not an error, just a warning + assert "warning" in result.output.lower() + assert "no case" in result.output.lower() + + @pytest.mark.cmd_import_gherkin + def test_import_gherkin_required_parameters(self): + """Test that required parameters are validated""" + # Missing --file + result = self.runner.invoke(cmd_import_gherkin.cli, ["--section-id", "123", "--project-id", "1"]) + assert result.exit_code == 2 # Click error for missing required option + + # Missing --section-id + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n") + + result = self.runner.invoke(cmd_import_gherkin.cli, ["--file", "test.feature", "--project-id", "1"]) + assert result.exit_code == 2 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_invalid_section_id(self, mock_api_client_class, mock_api_handler_class): + """Test with invalid section ID (negative number)""" + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", self.test_feature_path, "--section-id", "-1"], # Invalid: negative + obj=self.environment, + ) + + # Click IntRange validation should catch this + assert result.exit_code == 2 + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_unicode_content(self, mock_api_client_class, mock_api_handler_class): + """Test feature file with unicode characters""" + # Mock API client + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + # Mock API request handler + mock_handler = MagicMock() + mock_api_handler_class.return_value = mock_handler + mock_handler.add_bdd.return_value = ([456], "") + + with self.runner.isolated_filesystem(): + # Create feature file with unicode + with open("unicode.feature", "w", encoding="utf-8") as f: + f.write("Feature: Tëst with ūnīcödé\n Scenario: Test 测试\n Given test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "unicode.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 0 diff --git a/tests/test_cmd_parse_cucumber.py b/tests/test_cmd_parse_cucumber.py new file mode 100644 index 0000000..1c2cfea --- /dev/null +++ b/tests/test_cmd_parse_cucumber.py @@ -0,0 +1,385 @@ +import pytest +import json +from unittest import mock +from unittest.mock import MagicMock, patch +from click.testing import CliRunner +from pathlib import Path + +from trcli.cli import Environment +from trcli.commands import cmd_parse_cucumber + + +class TestCmdParseCucumber: + """Test class for parse_cucumber command functionality""" + + def setup_method(self): + """Set up test environment and runner""" + self.runner = CliRunner() + self.test_cucumber_path = str(Path(__file__).parent / "test_data" / "CUCUMBER" / "sample_cucumber.json") + + # Set up environment with required parameters + self.environment = Environment(cmd="parse_cucumber") + self.environment.host = "https://test.testrail.com" + self.environment.username = "test@example.com" + self.environment.password = "password" + self.environment.project = "Test Project" + self.environment.project_id = 1 + self.environment.auto_creation_response = True # Enable auto-creation for tests + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_workflow1_results_only(self, mock_parser_class, mock_uploader_class): + """Test Workflow 1: Parse and upload results only (no feature upload)""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.name = "Test Suite" + mock_parser.parse_file.return_value = [mock_suite] + + # Mock uploader + mock_uploader = MagicMock() + mock_uploader_class.return_value = mock_uploader + mock_uploader.last_run_id = 123 + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 0 + mock_parser.parse_file.assert_called_once() + mock_uploader.upload_results.assert_called_once() + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + @patch( + "builtins.open", + new_callable=mock.mock_open, + read_data='[{"name":"Test Feature","elements":[{"type":"scenario","name":"Test Scenario"}]}]', + ) + def test_parse_cucumber_workflow2_upload_feature( + self, mock_open, mock_parser_class, mock_uploader_class, mock_api_handler_class + ): + """Test Workflow 2: Create BDD test cases per feature, then upload results""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + + # Mock suite with test cases + mock_suite = MagicMock() + mock_suite.name = "Test Suite" + mock_section = MagicMock() + mock_section.name = "Test Feature" + mock_case = MagicMock() + mock_case.case_id = None + mock_case.result = MagicMock() + mock_section.testcases = [mock_case] + mock_suite.testsections = [mock_section] + mock_parser.parse_file.return_value = [mock_suite] + + # Mock _generate_feature_content to return Gherkin content + mock_parser._generate_feature_content.return_value = "Feature: Test\n Scenario: Test\n Given test step\n" + + # Mock API handler + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + + mock_api_handler.get_bdd_template_id.return_value = (2, "") # BDD template ID = 2 + mock_api_handler.add_bdd.return_value = ([101], "") # Returns list with case ID = 101 + mock_api_handler.update_case_automation_id.return_value = (True, "") # Success updating automation_id + + # Mock uploader + mock_uploader = MagicMock() + mock_uploader_class.return_value = mock_uploader + mock_uploader.last_run_id = 123 + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + "--feature-section-id", + "456", + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 0 + mock_api_handler.get_bdd_template_id.assert_called_once() + mock_api_handler.add_bdd.assert_called_once() + mock_api_handler.update_case_automation_id.assert_called_once() + mock_uploader.upload_results.assert_called() + + @pytest.mark.cmd_parse_cucumber + def test_parse_cucumber_upload_feature_requires_section_id(self): + """Test that --upload-feature requires --feature-section-id""" + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + # Missing --feature-section-id + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "feature-section-id is required" in result.output.lower() + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_upload_feature_with_no_flag(self, mock_parser_class, mock_uploader_class): + """Test that -n flag skips test case creation with --upload-feature""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.name = "Test Suite" + mock_section = MagicMock() + mock_section.name = "Test Feature" + mock_section.testcases = [] + mock_suite.testsections = [mock_section] + mock_parser.parse_file.return_value = [mock_suite] + + # Mock uploader + mock_uploader = MagicMock() + mock_uploader_class.return_value = mock_uploader + mock_uploader.last_run_id = 123 + + # Set auto_creation_response to False (simulates -n flag) + self.environment.auto_creation_response = False + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + "--feature-section-id", + "456", + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 0 + assert "skipping bdd test case creation" in result.output.lower() + assert "auto-creation disabled" in result.output.lower() + mock_uploader.upload_results.assert_called() + + @pytest.mark.cmd_parse_cucumber + def test_parse_cucumber_missing_file(self): + """Test with non-existent Cucumber JSON file""" + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", "/nonexistent/results.json", "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "not found" in result.output.lower() or result.exception is not None + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_invalid_json(self, mock_parser_class): + """Test with invalid JSON format""" + # Mock parser to raise JSONDecodeError + mock_parser_class.side_effect = json.JSONDecodeError("Invalid JSON", "", 0) + + with self.runner.isolated_filesystem(): + # Create invalid JSON file + with open("invalid.json", "w") as f: + f.write("This is not valid JSON{{{") + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", "invalid.json", "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 1 + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_empty_json(self, mock_parser_class): + """Test with empty JSON file""" + with self.runner.isolated_filesystem(): + # Create empty JSON file + with open("empty.json", "w") as f: + f.write("[]") + + # Mock parser to return empty list + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_parser.parse_file.return_value = [] + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", "empty.json", "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + # Should handle gracefully (may succeed with warning or fail) + # Exit code depends on implementation + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + @patch("builtins.open", new_callable=mock.mock_open, read_data="[]") + def test_parse_cucumber_invalid_cucumber_json(self, mock_open, mock_parser_class, mock_api_handler_class): + """Test with invalid Cucumber JSON structure (empty array)""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_parser.parse_file.return_value = [mock_suite] + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + "--feature-section-id", + "456", + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 1 + # Check that it fails with any appropriate error (either JSON format or parsing error) + assert "invalid cucumber json format" in result.output.lower() or "error parsing" in result.output.lower() + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + @patch( + "builtins.open", + new_callable=mock.mock_open, + read_data='[{"name":"Test Feature","elements":[{"type":"scenario","name":"Test Scenario"}]}]', + ) + def test_parse_cucumber_api_error_during_feature_upload(self, mock_open, mock_parser_class, mock_api_handler_class): + """Test API error during BDD test case creation""" + # Mock parser + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_section = MagicMock() + mock_section.name = "Test Feature" + mock_suite.testsections = [mock_section] + mock_parser.parse_file.return_value = [mock_suite] + mock_parser._generate_feature_content.return_value = "Feature: Test\n Scenario: Test\n" + + # Mock API handler with error + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + # Mock project data resolution + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + + mock_api_handler.get_bdd_template_id.return_value = (2, "") + mock_api_handler.add_bdd.return_value = ([], "API Error: Section not found") + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + [ + "--file", + self.test_cucumber_path, + "--suite-id", + "2", + "--upload-feature", + "--feature-section-id", + "456", + "--title", + "Test Run", + ], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "error creating" in result.output.lower() + + @pytest.mark.cmd_parse_cucumber + def test_parse_cucumber_required_parameters(self): + """Test that required parameters are validated""" + # Missing --file + result = self.runner.invoke( + cmd_parse_cucumber.cli, ["--project-id", "1", "--suite-id", "2", "--title", "Test Run"] + ) + # Will fail due to missing required params + + # Missing --project-id (handled by check_for_required_parameters) + result = self.runner.invoke( + cmd_parse_cucumber.cli, ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"] + ) + # Will fail + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_validation_exception(self, mock_parser_class, mock_uploader_class): + """Test handling of ValidationException""" + from trcli.data_classes.validation_exception import ValidationException + + # Mock parser to raise ValidationException + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_parser.parse_file.side_effect = ValidationException("CucumberParser", "Validation error occurred") + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "validation error" in result.output.lower() + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_value_error(self, mock_parser_class, mock_uploader_class): + """Test handling of ValueError during parsing""" + # Mock parser to raise ValueError + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_parser.parse_file.side_effect = ValueError("Invalid Cucumber JSON structure") + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 1 + assert "error parsing" in result.output.lower() diff --git a/tests/test_cmd_parse_gherkin.py b/tests/test_cmd_parse_gherkin.py new file mode 100644 index 0000000..1d6624e --- /dev/null +++ b/tests/test_cmd_parse_gherkin.py @@ -0,0 +1,141 @@ +import pytest +import json +from unittest import mock +from unittest.mock import MagicMock, patch, mock_open +from click.testing import CliRunner +from pathlib import Path + +from trcli.cli import Environment +from trcli.commands import cmd_parse_gherkin +from trcli.readers.gherkin_parser import GherkinParser + + +class TestCmdParseGherkin: + """Test class for parse_gherkin command functionality""" + + def setup_method(self): + """Set up test environment and runner""" + self.runner = CliRunner() + self.test_feature_path = str(Path(__file__).parent / "test_data" / "FEATURE" / "sample_login.feature") + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_success_stdout(self): + """Test successful parsing with output to stdout""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path]) + + assert result.exit_code == 0 + # Output contains logging messages + JSON, extract JSON (starts with '{') + json_start = result.output.find("{") + assert json_start >= 0, "No JSON found in output" + json_str = result.output[json_start:] + output_data = json.loads(json_str) + assert "suites" in output_data + assert "summary" in output_data + assert output_data["summary"]["total_suites"] >= 1 + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_success_with_output_file(self): + """Test successful parsing with output to file""" + with self.runner.isolated_filesystem(): + output_file = "parsed_output.json" + result = self.runner.invoke( + cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--output", output_file] + ) + + assert result.exit_code == 0 + assert "parsed results saved to" in result.output.lower() + + # Verify file was created + with open(output_file, "r") as f: + output_data = json.load(f) + assert "suites" in output_data + assert "summary" in output_data + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_pretty_print(self): + """Test parsing with pretty print formatting""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path, "--pretty"]) + + assert result.exit_code == 0 + # Extract JSON from output + json_start = result.output.find("{") + json_str = result.output[json_start:] + output_data = json.loads(json_str) + assert "suites" in output_data + # Check that JSON portion contains newlines and indentation (pretty format) + assert "\n" in json_str + assert " " in json_str # Indentation + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_missing_file(self): + """Test parsing with non-existent file""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", "/nonexistent/file.feature"]) + + # Click returns exit code 2 for invalid parameter (file doesn't exist) + assert result.exit_code in [1, 2] # Either our error handling or Click's + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_invalid_feature_file(self): + """Test parsing with invalid Gherkin syntax""" + with self.runner.isolated_filesystem(): + # Create invalid feature file + invalid_file = "invalid.feature" + with open(invalid_file, "w") as f: + f.write("This is not valid Gherkin syntax at all!!!") + + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", invalid_file]) + + assert result.exit_code == 1 + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_required_file_parameter(self): + """Test that --file parameter is required""" + result = self.runner.invoke(cmd_parse_gherkin.cli, []) + + assert result.exit_code == 2 # Click returns 2 for missing required params + assert "Missing option" in result.output or "required" in result.output.lower() + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_output_structure(self): + """Test that output has correct structure""" + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", self.test_feature_path]) + + assert result.exit_code == 0 + json_start = result.output.find("{") + output_data = json.loads(result.output[json_start:]) + + # Verify top-level structure + assert "suites" in output_data + assert "summary" in output_data + + # Verify summary structure + summary = output_data["summary"] + assert "total_suites" in summary + assert "total_sections" in summary + assert "total_cases" in summary + assert "source_file" in summary + + # Verify suites structure + if output_data["suites"]: + suite = output_data["suites"][0] + assert "name" in suite + assert "source" in suite + assert "testsections" in suite + + if suite["testsections"]: + section = suite["testsections"][0] + assert "name" in section + assert "testcases" in section + + @pytest.mark.cmd_parse_gherkin + def test_parse_gherkin_empty_file(self): + """Test parsing with empty feature file""" + with self.runner.isolated_filesystem(): + empty_file = "empty.feature" + with open(empty_file, "w") as f: + f.write("") + + result = self.runner.invoke(cmd_parse_gherkin.cli, ["--file", empty_file]) + + # Should fail with parsing error + assert result.exit_code == 1 diff --git a/tests/test_cucumber_parser.py b/tests/test_cucumber_parser.py new file mode 100644 index 0000000..03b3388 --- /dev/null +++ b/tests/test_cucumber_parser.py @@ -0,0 +1,256 @@ +import pytest +from pathlib import Path +from trcli.cli import Environment +from trcli.data_classes.data_parsers import MatchersParser +from trcli.readers.cucumber_json import CucumberParser + + +class TestCucumberParser: + """Tests for Cucumber JSON parser""" + + @pytest.fixture + def sample_cucumber_path(self): + """Path to the sample Cucumber JSON file""" + return Path(__file__).parent / "test_data" / "CUCUMBER" / "sample_cucumber.json" + + @pytest.fixture + def environment(self, sample_cucumber_path): + """Create a test environment""" + env = Environment() + env.file = str(sample_cucumber_path) + env.case_matcher = MatchersParser.AUTO + env.suite_name = None + env.verbose = False + return env + + @pytest.mark.parse_cucumber + def test_cucumber_parser_basic(self, environment, sample_cucumber_path): + """Test basic Cucumber JSON parsing""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + assert len(suites) == 1 + suite = suites[0] + + # Check suite structure + assert suite.name == "Cucumber Test Results" + assert len(suite.testsections) == 1 + + # Check section + section = suite.testsections[0] + assert section.name == "User Login" + assert len(section.testcases) == 2 + + @pytest.mark.parse_cucumber + def test_cucumber_parser_scenarios(self, environment): + """Test that scenarios are parsed correctly""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + cases = section.testcases + + # First scenario - passed + case1 = cases[0] + assert "Successful login" in case1.title + assert case1.result.status_id == 1 # Passed + assert len(case1.result.custom_step_results) == 5 + + # Second scenario - failed + case2 = cases[1] + assert "Failed login" in case2.title + assert case2.result.status_id == 5 # Failed + assert len(case2.result.custom_step_results) == 5 + + @pytest.mark.parse_cucumber + def test_cucumber_parser_steps(self, environment): + """Test that steps are parsed with correct status""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + case1 = section.testcases[0] + + # Check steps + steps = case1.result.custom_step_results + assert all(step.status_id == 1 for step in steps) # All passed + + # Check step content + assert "Given" in steps[0].content + assert "I am on the login page" in steps[0].content + + @pytest.mark.parse_cucumber + def test_cucumber_parser_automation_id(self, environment): + """Test automation ID generation""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + case1 = section.testcases[0] + + # Check automation ID includes feature name, tags, and scenario name + assert case1.custom_automation_id is not None + assert "User Login" in case1.custom_automation_id + assert "@positive" in case1.custom_automation_id + + @pytest.mark.parse_cucumber + def test_cucumber_parser_tags(self, environment): + """Test that tags are extracted correctly""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + case1 = section.testcases[0] + + # Check tags in case_fields + assert "tags" in case1.case_fields + tags_str = case1.case_fields["tags"] + assert "@smoke" in tags_str + assert "@authentication" in tags_str + assert "@positive" in tags_str + + @pytest.mark.parse_cucumber + def test_cucumber_generate_feature_file(self, environment): + """Test .feature file generation""" + parser = CucumberParser(environment) + feature_content = parser.generate_feature_file() + + assert feature_content + assert "Feature: User Login" in feature_content + assert "Scenario: Successful login" in feature_content + assert "Scenario: Failed login" in feature_content + assert "Given I am on the login page" in feature_content + assert "@smoke" in feature_content + + @pytest.mark.parse_cucumber + def test_cucumber_parser_elapsed_time(self, environment): + """Test elapsed time calculation""" + parser = CucumberParser(environment) + suites = parser.parse_file() + + section = suites[0].testsections[0] + case1 = section.testcases[0] + + # Check elapsed time is calculated (may be None if very short duration) + # The proper_format_for_elapsed in TestRailResult may strip very small values + if case1.result.elapsed is not None: + assert case1.result.elapsed.endswith("s") + + @pytest.fixture + def advanced_cucumber_path(self): + """Path to the advanced Cucumber JSON file with Background, Examples, and Rules""" + return Path(__file__).parent / "test_data" / "CUCUMBER" / "sample_cucumber_advanced.json" + + @pytest.fixture + def advanced_environment(self, advanced_cucumber_path): + """Create a test environment for advanced features""" + env = Environment() + env.file = str(advanced_cucumber_path) + env.case_matcher = MatchersParser.AUTO + env.suite_name = None + env.verbose = False + return env + + @pytest.mark.parse_cucumber + def test_cucumber_generate_background(self, advanced_environment): + """Test Background element generation in .feature file""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + assert "Background: User is logged in" in feature_content + assert "Given I am logged in as a customer" in feature_content + assert "And my shopping cart is empty" in feature_content + + @pytest.mark.parse_cucumber + def test_cucumber_generate_scenario_outline_with_examples(self, advanced_environment): + """Test Scenario Outline with Examples table generation""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + # Check Scenario Outline + assert "Scenario Outline: Add items to cart" in feature_content + + # Check Examples section + assert "Examples:" in feature_content + assert "| quantity | product | price |" in feature_content + assert "| 1 | Laptop | $1000 |" in feature_content + assert "| 2 | Mouse | $40 |" in feature_content + assert "| 3 | Keyboard | $150 |" in feature_content + + # Check Examples tags + assert "@products" in feature_content + + @pytest.mark.parse_cucumber + def test_cucumber_generate_rule_with_nested_elements(self, advanced_environment): + """Test Rule element with nested Background and Scenario""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + # Check Rule + assert "Rule: Payment validation" in feature_content + assert "@validation" in feature_content + + # Check nested Background under Rule + assert "Background: Setup payment environment" in feature_content + assert "Given the payment gateway is available" in feature_content + + # Check nested Scenario under Rule + assert "Scenario: Valid credit card payment" in feature_content + assert "When I pay with a valid credit card" in feature_content + assert "Then the payment should be approved" in feature_content + + @pytest.mark.parse_cucumber + def test_cucumber_advanced_feature_structure(self, advanced_environment): + """Test complete feature structure with all elements""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + # Check feature tags and name + assert "@shopping" in feature_content + assert "@cart" in feature_content + assert "Feature: Shopping Cart" in feature_content + + # Check feature description + assert "As a customer" in feature_content + assert "I want to manage my shopping cart" in feature_content + + # Verify proper ordering: Background before Scenarios + background_pos = feature_content.find("Background:") + scenario_outline_pos = feature_content.find("Scenario Outline:") + assert background_pos < scenario_outline_pos, "Background should appear before Scenario Outline" + + @pytest.mark.parse_cucumber + def test_cucumber_multiple_features_in_output(self, advanced_environment): + """Test that multiple features are separated correctly""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + # Should have both features + assert "Feature: Shopping Cart" in feature_content + assert "Feature: Payment Processing" in feature_content + + # Features should be separated by double newline + features = feature_content.split("\n\n") + # Should have at least 2 distinct feature sections + feature_count = feature_content.count("Feature:") + assert feature_count == 2, "Should have exactly 2 features" + + @pytest.mark.parse_cucumber + def test_cucumber_indentation_in_generated_feature(self, advanced_environment): + """Test proper indentation in generated .feature file""" + parser = CucumberParser(advanced_environment) + feature_content = parser.generate_feature_file() + + lines = feature_content.split("\n") + + # Background should be indented with 2 spaces + background_lines = [l for l in lines if "Background:" in l] + assert any(l.startswith(" Background:") for l in background_lines) + + # Steps should be indented with 4 spaces + given_lines = [l for l in lines if l.strip().startswith("Given")] + assert any(l.startswith(" Given") for l in given_lines) + + # Examples should be indented with 4 spaces + examples_lines = [l for l in lines if "Examples:" in l] + assert any(l.startswith(" Examples:") for l in examples_lines) diff --git a/tests/test_data/CUCUMBER/sample_cucumber.json b/tests/test_data/CUCUMBER/sample_cucumber.json new file mode 100644 index 0000000..b1863d2 --- /dev/null +++ b/tests/test_data/CUCUMBER/sample_cucumber.json @@ -0,0 +1,175 @@ +[ + { + "uri": "features/login.feature", + "id": "user-login", + "keyword": "Feature", + "name": "User Login", + "description": " As a user\n I want to log into the application\n So that I can access my account", + "line": 1, + "tags": [ + { + "name": "@smoke", + "line": 1 + }, + { + "name": "@authentication", + "line": 1 + } + ], + "elements": [ + { + "id": "user-login;successful-login-with-valid-credentials", + "keyword": "Scenario", + "name": "Successful login with valid credentials", + "description": "", + "line": 7, + "type": "scenario", + "tags": [ + { + "name": "@positive", + "line": 6 + } + ], + "steps": [ + { + "keyword": "Given ", + "name": "I am on the login page", + "line": 8, + "match": { + "location": "step_definitions/login_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1234567890 + } + }, + { + "keyword": "When ", + "name": "I enter valid username \"testuser\"", + "line": 9, + "match": { + "location": "step_definitions/login_steps.js:15" + }, + "result": { + "status": "passed", + "duration": 987654321 + } + }, + { + "keyword": "And ", + "name": "I enter valid password \"password123\"", + "line": 10, + "match": { + "location": "step_definitions/login_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 876543210 + } + }, + { + "keyword": "And ", + "name": "I click the login button", + "line": 11, + "match": { + "location": "step_definitions/login_steps.js:25" + }, + "result": { + "status": "passed", + "duration": 2345678901 + } + }, + { + "keyword": "Then ", + "name": "I should be redirected to the dashboard", + "line": 12, + "match": { + "location": "step_definitions/login_steps.js:30" + }, + "result": { + "status": "passed", + "duration": 543210987 + } + } + ] + }, + { + "id": "user-login;failed-login-with-invalid-credentials", + "keyword": "Scenario", + "name": "Failed login with invalid credentials", + "description": "", + "line": 15, + "type": "scenario", + "tags": [ + { + "name": "@negative", + "line": 14 + } + ], + "steps": [ + { + "keyword": "Given ", + "name": "I am on the login page", + "line": 16, + "match": { + "location": "step_definitions/login_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1234567890 + } + }, + { + "keyword": "When ", + "name": "I enter invalid username \"baduser\"", + "line": 17, + "match": { + "location": "step_definitions/login_steps.js:15" + }, + "result": { + "status": "passed", + "duration": 987654321 + } + }, + { + "keyword": "And ", + "name": "I enter invalid password \"wrongpass\"", + "line": 18, + "match": { + "location": "step_definitions/login_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 876543210 + } + }, + { + "keyword": "And ", + "name": "I click the login button", + "line": 19, + "match": { + "location": "step_definitions/login_steps.js:25" + }, + "result": { + "status": "passed", + "duration": 2345678901 + } + }, + { + "keyword": "Then ", + "name": "I should see an error message \"Invalid credentials\"", + "line": 20, + "match": { + "location": "step_definitions/login_steps.js:35" + }, + "result": { + "status": "failed", + "duration": 543210987, + "error_message": "AssertionError: expected 'Please try again' to equal 'Invalid credentials'" + } + } + ] + } + ] + } +] diff --git a/tests/test_data/CUCUMBER/sample_cucumber_advanced.json b/tests/test_data/CUCUMBER/sample_cucumber_advanced.json new file mode 100644 index 0000000..19ac15a --- /dev/null +++ b/tests/test_data/CUCUMBER/sample_cucumber_advanced.json @@ -0,0 +1,234 @@ +[ + { + "uri": "features/shopping_cart.feature", + "id": "shopping-cart", + "keyword": "Feature", + "name": "Shopping Cart", + "description": " As a customer\n I want to manage my shopping cart\n So that I can purchase items", + "line": 1, + "tags": [ + { + "name": "@shopping", + "line": 1 + }, + { + "name": "@cart", + "line": 1 + } + ], + "elements": [ + { + "id": "shopping-cart;background", + "keyword": "Background", + "name": "User is logged in", + "description": "", + "line": 5, + "type": "background", + "steps": [ + { + "keyword": "Given ", + "name": "I am logged in as a customer", + "line": 6, + "match": { + "location": "step_definitions/auth_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + }, + { + "keyword": "And ", + "name": "my shopping cart is empty", + "line": 7, + "match": { + "location": "step_definitions/cart_steps.js:5" + }, + "result": { + "status": "passed", + "duration": 500000000 + } + } + ] + }, + { + "id": "shopping-cart;add-items-to-cart", + "keyword": "Scenario Outline", + "name": "Add items to cart", + "description": "", + "line": 10, + "type": "scenario_outline", + "tags": [ + { + "name": "@positive", + "line": 9 + } + ], + "steps": [ + { + "keyword": "When ", + "name": "I add \"\" of \"\" to my cart", + "line": 11, + "match": { + "location": "step_definitions/cart_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 2000000000 + } + }, + { + "keyword": "Then ", + "name": "my cart should contain \"\" items", + "line": 12, + "match": { + "location": "step_definitions/cart_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + }, + { + "keyword": "And ", + "name": "the total price should be \"\"", + "line": 13, + "match": { + "location": "step_definitions/cart_steps.js:30" + }, + "result": { + "status": "passed", + "duration": 500000000 + } + } + ], + "examples": [ + { + "keyword": "Examples", + "name": "Valid products", + "description": "", + "line": 15, + "tags": [ + { + "name": "@products", + "line": 14 + } + ], + "rows": [ + { + "cells": ["quantity", "product", "price"], + "line": 16 + }, + { + "cells": ["1", "Laptop", "$1000"], + "line": 17 + }, + { + "cells": ["2", "Mouse", "$40"], + "line": 18 + }, + { + "cells": ["3", "Keyboard", "$150"], + "line": 19 + } + ] + } + ] + } + ] + }, + { + "uri": "features/payment.feature", + "id": "payment-processing", + "keyword": "Feature", + "name": "Payment Processing", + "description": " Customers can pay using various methods", + "line": 1, + "tags": [ + { + "name": "@payment", + "line": 1 + } + ], + "elements": [ + { + "id": "payment-processing;payment-validation", + "keyword": "Rule", + "name": "Payment validation", + "description": " All payments must be validated before processing", + "line": 5, + "type": "rule", + "tags": [ + { + "name": "@validation", + "line": 4 + } + ], + "children": [ + { + "id": "payment-processing;payment-validation;background", + "keyword": "Background", + "name": "Setup payment environment", + "description": "", + "line": 8, + "type": "background", + "steps": [ + { + "keyword": "Given ", + "name": "the payment gateway is available", + "line": 9, + "match": { + "location": "step_definitions/payment_steps.js:5" + }, + "result": { + "status": "passed", + "duration": 1500000000 + } + } + ] + }, + { + "id": "payment-processing;payment-validation;valid-credit-card", + "keyword": "Scenario", + "name": "Valid credit card payment", + "description": "", + "line": 11, + "type": "scenario", + "tags": [ + { + "name": "@credit-card", + "line": 10 + } + ], + "steps": [ + { + "keyword": "When ", + "name": "I pay with a valid credit card", + "line": 12, + "match": { + "location": "step_definitions/payment_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 3000000000 + } + }, + { + "keyword": "Then ", + "name": "the payment should be approved", + "line": 13, + "match": { + "location": "step_definitions/payment_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + } + ] + } + ] + } + ] + } +] diff --git a/tests/test_data/FEATURE/sample_bdd.feature b/tests/test_data/FEATURE/sample_bdd.feature new file mode 100644 index 0000000..945200a --- /dev/null +++ b/tests/test_data/FEATURE/sample_bdd.feature @@ -0,0 +1,23 @@ +@smoke @authentication +Feature: User Authentication + As a user + I want to authenticate securely + So that I can access the system + + @positive @login + Scenario: Successful login with valid credentials + Given I am on the login page + When I enter valid username "testuser" + And I enter valid password "password123" + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message + + @negative @login + Scenario: Failed login with invalid password + Given I am on the login page + When I enter valid username "testuser" + And I enter invalid password "wrongpass" + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page diff --git a/tests/test_data/FEATURE/sample_login.feature b/tests/test_data/FEATURE/sample_login.feature new file mode 100644 index 0000000..e0287b4 --- /dev/null +++ b/tests/test_data/FEATURE/sample_login.feature @@ -0,0 +1,41 @@ +Feature: User Login + As a registered user + I want to log in to the application + So that I can access my account + + Background: + Given the application is running + And I am on the login page + + @smoke @authentication + Scenario: Successful login with valid credentials + Given I have a valid username "testuser" + And I have a valid password "password123" + When I enter my credentials + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message "Welcome, testuser" + + @negative @authentication + Scenario: Failed login with invalid password + Given I have a valid username "testuser" + And I have an invalid password "wrongpassword" + When I enter my credentials + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page + + @edge-case + Scenario Outline: Login attempts with various credentials + Given I have username "" + And I have password "" + When I enter my credentials + And I click the login button + Then I should see result "" + + Examples: + | username | password | result | + | admin | admin123 | Dashboard | + | testuser | test123 | Dashboard | + | invalid | invalid123 | Invalid credentials | + | empty | | Password required | diff --git a/tests/test_data/XML/bdd_all_fail.xml b/tests/test_data/XML/bdd_all_fail.xml new file mode 100644 index 0000000..af13597 --- /dev/null +++ b/tests/test_data/XML/bdd_all_fail.xml @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/tests/test_data/XML/bdd_all_pass.xml b/tests/test_data/XML/bdd_all_pass.xml new file mode 100644 index 0000000..a6b128e --- /dev/null +++ b/tests/test_data/XML/bdd_all_pass.xml @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/tests/test_data/XML/bdd_inconsistent_case_ids.xml b/tests/test_data/XML/bdd_inconsistent_case_ids.xml new file mode 100644 index 0000000..135fd8a --- /dev/null +++ b/tests/test_data/XML/bdd_inconsistent_case_ids.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/tests/test_data/XML/bdd_mixed_results.xml b/tests/test_data/XML/bdd_mixed_results.xml new file mode 100644 index 0000000..40c6c19 --- /dev/null +++ b/tests/test_data/XML/bdd_mixed_results.xml @@ -0,0 +1,25 @@ + + + + + + + + + + + + + +Expected: Login denied with error "Invalid credentials" +Actual: Login successful +at test_login.py:45 + + + + + + + + + diff --git a/tests/test_data/XML/bdd_no_case_id.xml b/tests/test_data/XML/bdd_no_case_id.xml new file mode 100644 index 0000000..e7987fc --- /dev/null +++ b/tests/test_data/XML/bdd_no_case_id.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/tests/test_data/XML/bdd_valid_testcase_names.xml b/tests/test_data/XML/bdd_valid_testcase_names.xml new file mode 100644 index 0000000..d504870 --- /dev/null +++ b/tests/test_data/XML/bdd_valid_testcase_names.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/tests/test_data/XML/bdd_valid_testsuite_property.xml b/tests/test_data/XML/bdd_valid_testsuite_property.xml new file mode 100644 index 0000000..3d987d2 --- /dev/null +++ b/tests/test_data/XML/bdd_valid_testsuite_property.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index 756feae..cf6f59b 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -62,11 +62,17 @@ "key": "key_from_custom_config", } -trcli_description = ('Supported and loaded modules:\n' - ' - parse_junit: JUnit XML Files (& Similar)\n' - ' - parse_robot: Robot Framework XML Files\n' - ' - parse_openapi: OpenAPI YML Files\n' - ' - add_run: Create a new test run\n' - ' - labels: Manage labels (projects, cases, and tests)\n') +trcli_description = ( + "Supported and loaded modules:\n" + " - parse_junit: JUnit XML Files (& Similar)\n" + " - parse_cucumber: Cucumber JSON results (BDD)\n" + " - import_gherkin: Upload .feature files to TestRail BDD\n" + " - export_gherkin: Export BDD test cases as .feature files\n" + " - parse_robot: Robot Framework XML Files\n" + " - parse_openapi: OpenAPI YML Files\n" + " - add_run: Create a new test run\n" + " - labels: Manage labels (add, update, delete, list)\n" + " - references: Manage references (cases and runs)\n" +) trcli_help_description = "TestRail CLI" diff --git a/tests/test_gherkin_parser.py b/tests/test_gherkin_parser.py new file mode 100644 index 0000000..496a4bc --- /dev/null +++ b/tests/test_gherkin_parser.py @@ -0,0 +1,179 @@ +import pytest +from pathlib import Path +from trcli.cli import Environment +from trcli.data_classes.data_parsers import MatchersParser +from trcli.readers.gherkin_parser import GherkinParser + + +class TestGherkinParser: + """Tests for Gherkin .feature file parser""" + + @pytest.fixture + def sample_feature_path(self): + """Path to the sample login feature file""" + return Path(__file__).parent / "test_data" / "FEATURE" / "sample_login.feature" + + @pytest.fixture + def environment(self, sample_feature_path): + """Create a test environment""" + env = Environment() + env.file = str(sample_feature_path) + env.case_matcher = MatchersParser.AUTO + env.suite_name = None + env.verbose = False + return env + + @pytest.mark.parse_gherkin + def test_gherkin_parser_sample_file(self, environment, sample_feature_path): + """Test parsing of sample_login.feature""" + # Ensure file exists + assert sample_feature_path.exists(), f"Sample file not found: {sample_feature_path}" + + # Create parser and parse + parser = GherkinParser(environment) + suites = parser.parse_file() + + # Verify structure + assert suites is not None + assert len(suites) == 1, "Should parse into exactly one suite" + + suite = suites[0] + assert suite.name == "User Login" + assert suite.source == "sample_login.feature" + + # Check sections + assert len(suite.testsections) == 1 + section = suite.testsections[0] + assert section.name == "User Login" + + # Check background stored as property + assert section.properties is not None + assert len(section.properties) > 0 + background_prop = section.properties[0] + assert background_prop.name == "background" + assert "the application is running" in background_prop.value + + # Check test cases (should have expanded scenario outline) + # Expected: 2 regular scenarios + 4 scenario outline examples = 6 total + assert len(section.testcases) >= 2, "Should have at least 2 test cases" + + # Verify first test case structure + first_case = section.testcases[0] + assert first_case.title is not None + assert first_case.custom_automation_id is not None + assert first_case.result is not None + assert len(first_case.result.custom_step_results) > 0 + + @pytest.mark.parse_gherkin + def test_gherkin_parser_scenario_parsing(self, environment, sample_feature_path): + """Test that scenarios are correctly parsed with steps""" + parser = GherkinParser(environment) + suites = parser.parse_file() + + suite = suites[0] + section = suite.testsections[0] + test_cases = section.testcases + + # Find the "Successful login" scenario + successful_login_case = None + for case in test_cases: + if "Successful login" in case.title: + successful_login_case = case + break + + assert successful_login_case is not None, "Should find 'Successful login' test case" + + # Verify steps + steps = successful_login_case.result.custom_step_results + assert len(steps) == 6, "Successful login scenario should have 6 steps" + + # Check first step + first_step = steps[0] + assert "Given" in first_step.content + assert "valid username" in first_step.content + + @pytest.mark.parse_gherkin + def test_gherkin_parser_tags_in_automation_id(self, environment, sample_feature_path): + """Test that tags are included in automation ID""" + parser = GherkinParser(environment) + suites = parser.parse_file() + + suite = suites[0] + section = suite.testsections[0] + test_cases = section.testcases + + # Find a case with tags + tagged_case = None + for case in test_cases: + if "@smoke" in case.custom_automation_id or "@authentication" in case.custom_automation_id: + tagged_case = case + break + + assert tagged_case is not None, "Should find a test case with tags in automation_id" + assert "@" in tagged_case.custom_automation_id, "Automation ID should contain tags" + + @pytest.mark.parse_gherkin + def test_gherkin_parser_scenario_outline_expansion(self, environment, sample_feature_path): + """Test that Scenario Outlines are expanded into multiple test cases""" + parser = GherkinParser(environment) + suites = parser.parse_file() + + suite = suites[0] + section = suite.testsections[0] + test_cases = section.testcases + + # Find scenario outline examples + outline_examples = [case for case in test_cases if "Example" in case.title] + + assert len(outline_examples) >= 4, "Should have at least 4 example cases from Scenario Outline" + + # Verify example case has parameters + example_case = outline_examples[0] + assert "example_params" in example_case.case_fields + assert example_case.result is not None + + @pytest.mark.parse_gherkin + def test_gherkin_parser_with_custom_suite_name(self, environment, sample_feature_path): + """Test parser with custom suite name""" + environment.suite_name = "Custom Suite Name" + + parser = GherkinParser(environment) + suites = parser.parse_file() + + assert suites[0].name == "Custom Suite Name" + + @pytest.mark.parse_gherkin + def test_gherkin_parser_case_matcher_name(self, environment, sample_feature_path): + """Test parser with NAME case matcher""" + environment.case_matcher = MatchersParser.NAME + + parser = GherkinParser(environment) + suites = parser.parse_file() + + # Should parse without errors + assert suites is not None + assert len(suites) == 1 + + @pytest.mark.parse_gherkin + def test_gherkin_parser_missing_file(self): + """Test parser with non-existent file""" + env = Environment() + env.file = "nonexistent.feature" + env.case_matcher = MatchersParser.AUTO + + with pytest.raises(FileNotFoundError): + parser = GherkinParser(env) + + @pytest.mark.parse_gherkin + def test_gherkin_parser_all_steps_untested(self, environment, sample_feature_path): + """Test that all steps are marked as untested by default""" + parser = GherkinParser(environment) + suites = parser.parse_file() + + suite = suites[0] + section = suite.testsections[0] + + for test_case in section.testcases: + assert test_case.result.status_id == 3, "Result status should be 3 (Untested)" + for step in test_case.result.custom_step_results: + assert step.status_id == 3, "All steps should be untested (status_id=3)" diff --git a/tests/test_junit_bdd_parser.py b/tests/test_junit_bdd_parser.py new file mode 100644 index 0000000..b9b798f --- /dev/null +++ b/tests/test_junit_bdd_parser.py @@ -0,0 +1,477 @@ +""" +Unit tests for BDD-specific JUnit parsing functionality + +Tests the --special-parser bdd mode that groups multiple scenarios +into a single TestRail BDD test case. +""" + +import pytest +from unittest.mock import Mock, MagicMock, patch +from pathlib import Path + +from trcli.cli import Environment +from trcli.readers.junit_xml import JunitParser +from trcli.data_classes.validation_exception import ValidationException + + +class TestBDDJunitParser: + """Test BDD mode for JUnit parser""" + + @pytest.fixture + def environment(self): + """Create mock environment for BDD mode""" + env = Mock(spec=Environment) + env.case_matcher = "auto" + env.special_parser = "bdd" + env.suite_name = None + env.file = None # Required by FileParser + env.params_from_config = {} # Required by JunitParser for custom statuses + env.log = Mock() + env.elog = Mock() + env.vlog = Mock() + return env + + @pytest.fixture + def mock_api_validation_success(self): + """Mock successful API validation (case exists and is BDD)""" + with patch("trcli.api.project_based_client.ProjectBasedClient") as mock_client_class: + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + # Mock successful get_case response with BDD field + mock_response.error_message = "" + mock_response.response_text = { + "id": 42, + "title": "User Enrollment", + "template_id": 4, + "custom_testrail_bdd_scenario": '[{"content":"Scenario 1"}]', + } + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + yield mock_client + + def test_bdd_mode_detection(self, environment): + """Test that BDD mode is correctly detected""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + assert parser._is_bdd_mode() is True + + def test_standard_mode_detection(self, environment): + """Test that standard mode is detected when not BDD""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + environment.special_parser = "junit" + parser = JunitParser(environment) + assert parser._is_bdd_mode() is False + + def test_extract_case_id_from_testsuite_property(self, environment): + """Test extracting case ID from testsuite property""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + # Parse and check case ID extraction + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + case_id = parser._extract_feature_case_id_from_property(testsuite) + assert case_id == 42 + + def test_extract_case_id_from_testcase_names(self, environment): + """Test extracting case ID from testcase names""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testcase_names.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + testcase_ids = parser._extract_case_id_from_testcases(testsuite) + assert len(testcase_ids) == 3 + assert all(case_id == 42 for _, case_id in testcase_ids) + + def test_validate_consistent_case_ids(self, environment): + """Test validation passes when all scenarios have same case ID""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testcase_names.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + case_id, errors = parser._extract_and_validate_bdd_case_id(testsuite) + assert case_id == 42 + assert len(errors) == 0 + + def test_validate_inconsistent_case_ids_error(self, environment): + """Test validation fails when scenarios have different case IDs""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_inconsistent_case_ids.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + case_id, errors = parser._extract_and_validate_bdd_case_id(testsuite) + assert case_id is None + assert len(errors) == 1 + assert "Multiple different case IDs" in errors[0] + assert "123" in errors[0] and "124" in errors[0] and "125" in errors[0] + + def test_validate_no_case_id_error(self, environment): + """Test validation fails when no case ID found""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_no_case_id.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + case_id, errors = parser._extract_and_validate_bdd_case_id(testsuite) + assert case_id is None + assert len(errors) == 1 + assert "No case ID found" in errors[0] + + def test_aggregate_all_pass(self, environment): + """Test status aggregation when all scenarios pass""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [1, 1, 1] # All passed + result = parser._aggregate_scenario_statuses(statuses) + assert result == 1 # Passed + + def test_aggregate_one_fail(self, environment): + """Test status aggregation when one scenario fails (fail-fast)""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [1, 5, 1] # One failed + result = parser._aggregate_scenario_statuses(statuses) + assert result == 5 # Failed + + def test_aggregate_all_skip(self, environment): + """Test status aggregation when all scenarios skipped""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [4, 4, 4] # All skipped + result = parser._aggregate_scenario_statuses(statuses) + assert result == 4 # Skipped + + def test_aggregate_pass_and_skip(self, environment): + """Test status aggregation with pass and skip (no fails)""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [1, 4, 1] # Mixed pass/skip + result = parser._aggregate_scenario_statuses(statuses) + assert result == 4 # Skipped (since some not executed) + + def test_aggregate_fail_and_skip(self, environment): + """Test status aggregation with fail and skip""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + statuses = [5, 4, 1] # Mixed fail/skip/pass + result = parser._aggregate_scenario_statuses(statuses) + assert result == 5 # Failed (failure takes precedence) + + def test_format_failure_message(self, environment): + """Test failure message formatting""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + mock_result = Mock() + mock_result.type = "AssertionError" + mock_result.message = "Expected X but got Y" + mock_result.text = "Details about failure" + + message = parser._format_failure_message("Test Scenario", mock_result) + + assert "Scenario: Test Scenario" in message + assert "Type: AssertionError" in message + assert "Message: Expected X but got Y" in message + assert "Details:\n Details about failure" in message + + def test_format_failure_message_truncation(self, environment): + """Test failure message truncates long text""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + mock_result = Mock() + mock_result.type = "Error" + mock_result.message = "Error" + mock_result.text = "A" * 600 # Long text + + message = parser._format_failure_message("Test", mock_result) + assert "... (truncated)" in message + assert len(message) < 700 # Should be truncated + + @patch("trcli.api.project_based_client.ProjectBasedClient") + def test_validate_case_exists_success(self, mock_client_class, environment): + """Test validation passes when case exists and is BDD""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + mock_response.error_message = "" + mock_response.response_text = { + "id": 42, + "title": "Test Feature", + "custom_testrail_bdd_scenario": '[{"content":"..."}]', + } + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + parser = JunitParser(environment) + is_valid, error_msg, case_data = parser._validate_bdd_case_exists(42, "Test Feature") + + assert is_valid is True + assert error_msg == "" + assert case_data["id"] == 42 + + @patch("trcli.api.project_based_client.ProjectBasedClient") + def test_validate_case_not_exists(self, mock_client_class, environment): + """Test validation fails when case doesn't exist""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + mock_response.error_message = "Field :case_id is not a valid test case." + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + parser = JunitParser(environment) + is_valid, error_msg, case_data = parser._validate_bdd_case_exists(999, "Test Feature") + + assert is_valid is False + assert "does not exist" in error_msg + assert "C999" in error_msg + + @patch("trcli.api.project_based_client.ProjectBasedClient") + def test_validate_case_not_bdd(self, mock_client_class, environment): + """Test validation fails when case is not BDD template""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + mock_response.error_message = "" + mock_response.response_text = { + "id": 42, + "title": "Regular Test Case", + "custom_testrail_bdd_scenario": None, # Not a BDD case + } + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + parser = JunitParser(environment) + is_valid, error_msg, case_data = parser._validate_bdd_case_exists(42, "Test Feature") + + assert is_valid is False + assert "is NOT a BDD test case" in error_msg + assert "custom_testrail_bdd_scenario" in error_msg + + def test_parse_bdd_feature_all_pass(self, environment, mock_api_validation_success): + """Test parsing BDD feature with all scenarios passing""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_all_pass.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + # Mock the case ID to match test data + mock_api_validation_success.api_request_handler.client.send_get.return_value.response_text["id"] = 100 + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case is not None + assert test_case.case_id == 100 + assert test_case.result.status_id == 1 # Passed + assert len(test_case.result.custom_step_results) == 2 + assert "Total Scenarios: 2" in test_case.result.comment + assert "Passed: 2" in test_case.result.comment + + def test_parse_bdd_feature_mixed_results(self, environment, mock_api_validation_success): + """Test parsing BDD feature with mixed results (pass/fail/skip)""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_mixed_results.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + # Mock the case ID + mock_api_validation_success.api_request_handler.client.send_get.return_value.response_text["id"] = 25293 + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case is not None + assert test_case.case_id == 25293 + assert test_case.result.status_id == 5 # Failed (fail-fast) + assert len(test_case.result.custom_step_results) == 3 + + # Check step statuses + assert test_case.result.custom_step_results[0].status_id == 1 # Passed + assert test_case.result.custom_step_results[1].status_id == 5 # Failed + assert test_case.result.custom_step_results[2].status_id == 4 # Skipped + + # Check comment contains summary and failure details + assert "Total Scenarios: 3" in test_case.result.comment + assert "Passed: 1" in test_case.result.comment + assert "Failed: 1" in test_case.result.comment + assert "Skipped: 1" in test_case.result.comment + assert "Failure Details:" in test_case.result.comment + assert "Invalid password" in test_case.result.comment + + def test_parse_bdd_feature_no_case_id_returns_none(self, environment): + """Test that parsing returns None when no case ID found""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_no_case_id.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case is None + environment.elog.assert_called() + + def test_parse_bdd_feature_inconsistent_ids_returns_none(self, environment): + """Test that parsing returns None when case IDs are inconsistent""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_inconsistent_case_ids.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case is None + environment.elog.assert_called() + + @patch("trcli.api.project_based_client.ProjectBasedClient") + def test_parse_bdd_feature_case_not_exists_raises_exception(self, mock_client_class, environment): + """Test that parsing raises ValidationException when case doesn't exist""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + + mock_client = MagicMock() + mock_api_handler = MagicMock() + mock_response = MagicMock() + + mock_response.error_message = "Case not found" + + mock_api_handler.client.send_get.return_value = mock_response + mock_client.api_request_handler = mock_api_handler + mock_client_class.return_value = mock_client + + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + with pytest.raises(ValidationException) as exc_info: + parser._parse_bdd_feature_as_single_case(testsuite) + + assert "case_id" in str(exc_info.value.field_name) + assert "BDD Feature" in str(exc_info.value.class_name) + + def test_parse_sections_bdd_mode(self, environment, mock_api_validation_success): + """Test that _parse_sections uses BDD mode when enabled""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testsuite_property.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file, parse_func=parser._add_root_element_to_tree) + + sections = parser._parse_sections(suite) + + assert len(sections) == 1 + assert len(sections[0].testcases) == 1 # One BDD test case + assert sections[0].testcases[0].case_id == 42 + + def test_parse_sections_standard_mode(self, environment): + """Test that _parse_sections uses standard mode when BDD not enabled""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_valid_testcase_names.xml" + environment.file = str(test_file) + environment.special_parser = "junit" # Standard mode + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file, parse_func=parser._add_root_element_to_tree) + + sections = parser._parse_sections(suite) + + assert len(sections) == 1 + # In standard mode, should have 3 separate test cases + assert len(sections[0].testcases) == 3 + + def test_elapsed_time_calculation(self, environment, mock_api_validation_success): + """Test that elapsed time is summed correctly from all scenarios""" + test_file = Path(__file__).parent / "test_data" / "XML" / "bdd_mixed_results.xml" + environment.file = str(test_file) + parser = JunitParser(environment) + + from junitparser import JUnitXml + + suite = JUnitXml.fromfile(test_file) + testsuite = list(suite)[0] + + mock_api_validation_success.api_request_handler.client.send_get.return_value.response_text["id"] = 25293 + + test_case = parser._parse_bdd_feature_as_single_case(testsuite) + + assert test_case.result.elapsed == "2s" # 1.0 + 1.5 + 0.0 = 2.5, rounds to 2 (banker's rounding) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_project_based_client.py b/tests/test_project_based_client.py index a0efc00..016495a 100644 --- a/tests/test_project_based_client.py +++ b/tests/test_project_based_client.py @@ -26,26 +26,21 @@ def project_based_client_data_provider(self, mocker): environment.file = "results.xml" environment.case_matcher = MatchersParser.AUTO - api_request_handler = mocker.patch( - "trcli.api.project_based_client.ApiRequestHandler" - ) + api_request_handler = mocker.patch("trcli.api.project_based_client.ApiRequestHandler") api_request_handler.get_project_data.return_value = ProjectData( project_id=environment.project_id, suite_mode=1, error_message="" ) api_request_handler.check_automation_id_field.return_value = None project_based_client = ProjectBasedClient( - environment=environment, suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_based_client.api_request_handler = api_request_handler yield environment, api_request_handler, project_based_client @pytest.mark.project_based_client - @pytest.mark.parametrize( - "timeout", [40, None], ids=["with_timeout", "without_timeout"] - ) - def test_instantiate_api_client( - self, timeout, project_based_client_data_provider, mocker - ): + @pytest.mark.parametrize("timeout", [40, None], ids=["with_timeout", "without_timeout"]) + def test_instantiate_api_client(self, timeout, project_based_client_data_provider, mocker): """The purpose of this test is to check that APIClient was instantiated properly and credential fields were set es expected.""" (_, api_request_handler, _) = project_based_client_data_provider @@ -57,24 +52,22 @@ def test_instantiate_api_client( environment.key = "test_api_key" if timeout: environment.timeout = timeout - timeout_expected_result = 30 if not timeout else timeout - project_based_client = ProjectBasedClient( - environment=environment, suite=junit_file_parser - ) + timeout_expected_result = 60 if not timeout else timeout + project_based_client = ProjectBasedClient(environment=environment, suite=junit_file_parser) api_client = project_based_client.instantiate_api_client() assert ( - api_client.username == environment.username + api_client.username == environment.username ), f"Expected username to be set to: {environment.username}, but got: {api_client.username} instead." assert ( - api_client.password == environment.password + api_client.password == environment.password ), f"Expected password to be set to: {environment.password}, but got: {api_client.password} instead." assert ( - api_client.api_key == environment.key + api_client.api_key == environment.key ), f"Expected api_key to be set to: {environment.key}, but got: {api_client.api_key} instead." assert ( - api_client.timeout == timeout_expected_result + api_client.timeout == timeout_expected_result ), f"Expected timeout to be set to: {timeout_expected_result}, but got: {api_client.timeout} instead." def test_resolve_project(self, project_based_client_data_provider): @@ -87,10 +80,10 @@ def test_resolve_project(self, project_based_client_data_provider): ) = project_based_client_data_provider project_based_client.resolve_project() - assert ( - project_based_client.project.project_id == environment.project_id - ), (f"Expected project_based_client.project to have {environment.project_id}," - f" but had {project_based_client.project.project_id}") + assert project_based_client.project.project_id == environment.project_id, ( + f"Expected project_based_client.project to have {environment.project_id}," + f" but had {project_based_client.project.project_id}" + ) @pytest.mark.project_based_client def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider): @@ -109,14 +102,10 @@ def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider) suite_mode=SuiteModes.single_suite ) + assert result_suite_id == suite_id, f"Expected suite_id: {suite_id} but got {result_suite_id} instead." + assert suite_added is False, f"Expected suite_added: {False} but got {suite_added} instead." assert ( - result_suite_id == suite_id - ), f"Expected suite_id: {suite_id} but got {result_suite_id} instead." - assert ( - suite_added is False - ), f"Expected suite_added: {False} but got {suite_added} instead." - assert ( - result_return_code == result_code + result_return_code == result_code ), f"Expected suite_id: {result_code} but got {result_return_code} instead." @pytest.mark.project_based_client @@ -126,14 +115,14 @@ def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider) ids=TEST_GET_SUITE_ID_PROMPTS_USER_IDS, ) def test_get_suite_id_multiple_suites_mode( - self, - user_response, - expected_suite_id, - expected_result_code, - expected_message, - suite_add_error, - project_based_client_data_provider, - mocker, + self, + user_response, + expected_suite_id, + expected_result_code, + expected_message, + suite_add_error, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check that user will be prompted to add suite is one is missing in TestRail. Depending on user response either information about addition of missing suite or error message @@ -160,9 +149,7 @@ def test_get_suite_id_multiple_suites_mode( else: project_based_client.api_request_handler.add_suites.return_value = ( [{"suite_id": expected_suite_id, "name": suite_name}], - FAULT_MAPPING["error_while_adding_suite"].format( - error_message="Failed to add suite." - ), + FAULT_MAPPING["error_while_adding_suite"].format(error_message="Failed to add suite."), ) project_based_client.api_request_handler.suites_data_from_provider.suite_id = None project_based_client.api_request_handler.suites_data_from_provider.name = suite_name @@ -177,18 +164,14 @@ def test_get_suite_id_multiple_suites_mode( if suite_add_error: expected_elog_calls.append( - mocker.call( - FAULT_MAPPING["error_while_adding_suite"].format( - error_message="Failed to add suite." - ) - ) + mocker.call(FAULT_MAPPING["error_while_adding_suite"].format(error_message="Failed to add suite.")) ) assert ( - expected_suite_id == result_suite_id + expected_suite_id == result_suite_id ), f"Expected suite_id: {expected_suite_id} but got {result_suite_id} instead." assert ( - expected_result_code == result_code + expected_result_code == result_code ), f"Expected suite_id: {expected_result_code} but got {result_code} instead." environment.get_prompt_response_for_auto_creation.assert_called_with( PROMPT_MESSAGES["create_new_suite"].format( @@ -197,9 +180,7 @@ def test_get_suite_id_multiple_suites_mode( ) ) if user_response: - project_based_client.api_request_handler.add_suites.assert_called_with( - project_id=project_id - ) + project_based_client.api_request_handler.add_suites.assert_called_with(project_id=project_id) environment.log.assert_has_calls(expected_log_calls) environment.elog.assert_has_calls(expected_elog_calls) @@ -210,13 +191,13 @@ def test_get_suite_id_multiple_suites_mode( ids=["get_suite_ids succeeds", "get_suite_ids fails"], ) def test_get_suite_id_single_suite_mode( - self, - suite_ids, - error_message, - expected_suite_id, - expected_result_code, - project_based_client_data_provider, - mocker, + self, + suite_ids, + error_message, + expected_suite_id, + expected_result_code, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check flow of get_suite_id_log_error function for single suite mode.""" @@ -238,10 +219,10 @@ def test_get_suite_id_single_suite_mode( result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." if error_message: environment.elog.assert_has_calls(expected_elog_calls) @@ -253,13 +234,13 @@ def test_get_suite_id_single_suite_mode( ids=TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_IDS, ) def test_get_suite_id_single_suite_mode_baselines( - self, - get_suite_ids_result, - expected_suite_id, - expected_result_code, - expected_error_message, - project_based_client_data_provider, - mocker, + self, + get_suite_ids_result, + expected_suite_id, + expected_result_code, + expected_error_message, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check flow of get_suite_id_log_error function for single suite with baselines mode.""" @@ -271,26 +252,22 @@ def test_get_suite_id_single_suite_mode_baselines( suite_mode = SuiteModes.single_suite_baselines project_based_client.api_request_handler.resolve_suite_id_using_name.return_value = (-1, "Any Error") project_based_client.api_request_handler.suites_data_from_provider.suite_id = None - project_based_client.api_request_handler.get_suite_ids.return_value = ( - get_suite_ids_result - ) + project_based_client.api_request_handler.get_suite_ids.return_value = get_suite_ids_result expected_elog_calls = [] if expected_error_message: expected_elog_calls = [mocker.call(expected_error_message)] result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." environment.elog.assert_has_calls(expected_elog_calls) @pytest.mark.project_based_client - def test_get_suite_id_unknown_suite_mode( - self, project_based_client_data_provider, mocker - ): + def test_get_suite_id_unknown_suite_mode(self, project_based_client_data_provider, mocker): """The purpose of this test is to check that get_suite_id will return -1 and print proper message when unknown suite mode will be returned during execution.""" ( @@ -302,18 +279,14 @@ def test_get_suite_id_unknown_suite_mode( expected_result_code = -1 expected_suite_id = -1 project_based_client.api_request_handler.suites_data_from_provider.suite_id = None - expected_elog_calls = [ - mocker.call( - FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode) - ) - ] + expected_elog_calls = [mocker.call(FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode))] result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." environment.elog.assert_has_calls(expected_elog_calls) @@ -333,13 +306,11 @@ def test_check_suite_id_returns_id(self, project_based_client_data_provider): result_code = project_based_client.check_suite_id(project_id=project_id) assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected to get {result_code} as result code, but got {expected_result_code} instead." @pytest.mark.project_based_client - def test_check_suite_id_prints_error_message( - self, project_based_client_data_provider, mocker - ): + def test_check_suite_id_prints_error_message(self, project_based_client_data_provider, mocker): """The purpose of this test is to check that proper message would be printed to the user and program will quit when suite ID is not present in TestRail.""" ( @@ -356,13 +327,11 @@ def test_check_suite_id_prints_error_message( ) result_code = project_based_client.check_suite_id(project_id=project_id) - expected_elog_calls = [ - mocker.call(FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) - ] + expected_elog_calls = [mocker.call(FAULT_MAPPING["missing_suite"].format(suite_id=suite_id))] environment.elog.assert_has_calls(expected_elog_calls) assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected to get {expected_result_code} as result code, but got {result_code} instead." def test_resolve_suite_returns_valid_id(self, project_based_client_data_provider): @@ -377,9 +346,7 @@ def test_resolve_suite_returns_valid_id(self, project_based_client_data_provider project_based_client.resolve_project() suite_id, suite_added = project_based_client.resolve_suite() - assert ( - suite_id == 1 - ), f"Expected suite id 1 but got {suite_id} instead." + assert suite_id == 1, f"Expected suite id 1 but got {suite_id} instead." def test_create_or_update_test_run_calls_add_run(self, project_based_client_data_provider): """The purpose of this test is to check that calling the method without a run_id in the environment causes @@ -396,12 +363,8 @@ def test_create_or_update_test_run_calls_add_run(self, project_based_client_data run_id, error_message = project_based_client.create_or_update_test_run() project_based_client.api_request_handler.add_run.assert_called_once() - assert ( - run_id == 1 - ), f"Expected run_id to be 1 but got {run_id} instead." - assert ( - error_message == "" - ), f"Expected error message to be None but got {error_message} instead." + assert run_id == 1, f"Expected run_id to be 1 but got {run_id} instead." + assert error_message == "", f"Expected error message to be None but got {error_message} instead." def test_create_or_update_test_run_calls_update_run(self, project_based_client_data_provider): """The purpose of this test is to check that calling the method with a run_id in the environment causes @@ -418,12 +381,8 @@ def test_create_or_update_test_run_calls_update_run(self, project_based_client_d run_id, error_message = project_based_client.create_or_update_test_run() api_request_handler.update_run.assert_called_once() - assert ( - run_id == 1 - ), f"Expected run_id to be 1 but got {run_id} instead." - assert ( - error_message == "" - ), f"Expected error message to be None but got {error_message} instead." + assert run_id == 1, f"Expected run_id to be 1 but got {run_id} instead." + assert error_message == "", f"Expected error message to be None but got {error_message} instead." def test_get_project_id(self, project_based_client_data_provider): """The purpose of this test is to check that the _get_project_id() will fall back to the environment.project_id @@ -434,7 +393,7 @@ def test_get_project_id(self, project_based_client_data_provider): project_based_client, ) = project_based_client_data_provider - assert ( - project_based_client._get_project_id() == environment.project_id - ), (f"Expected to get {environment.project_id} from project_based_client.get_project_id but got" - f" {project_based_client._get_project_id()} instead.") + assert project_based_client._get_project_id() == environment.project_id, ( + f"Expected to get {environment.project_id} from project_based_client.get_project_id but got" + f" {project_based_client._get_project_id()} instead." + ) diff --git a/tests_e2e/reports_cucumber/sample_cucumber.json b/tests_e2e/reports_cucumber/sample_cucumber.json new file mode 100644 index 0000000..b1863d2 --- /dev/null +++ b/tests_e2e/reports_cucumber/sample_cucumber.json @@ -0,0 +1,175 @@ +[ + { + "uri": "features/login.feature", + "id": "user-login", + "keyword": "Feature", + "name": "User Login", + "description": " As a user\n I want to log into the application\n So that I can access my account", + "line": 1, + "tags": [ + { + "name": "@smoke", + "line": 1 + }, + { + "name": "@authentication", + "line": 1 + } + ], + "elements": [ + { + "id": "user-login;successful-login-with-valid-credentials", + "keyword": "Scenario", + "name": "Successful login with valid credentials", + "description": "", + "line": 7, + "type": "scenario", + "tags": [ + { + "name": "@positive", + "line": 6 + } + ], + "steps": [ + { + "keyword": "Given ", + "name": "I am on the login page", + "line": 8, + "match": { + "location": "step_definitions/login_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1234567890 + } + }, + { + "keyword": "When ", + "name": "I enter valid username \"testuser\"", + "line": 9, + "match": { + "location": "step_definitions/login_steps.js:15" + }, + "result": { + "status": "passed", + "duration": 987654321 + } + }, + { + "keyword": "And ", + "name": "I enter valid password \"password123\"", + "line": 10, + "match": { + "location": "step_definitions/login_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 876543210 + } + }, + { + "keyword": "And ", + "name": "I click the login button", + "line": 11, + "match": { + "location": "step_definitions/login_steps.js:25" + }, + "result": { + "status": "passed", + "duration": 2345678901 + } + }, + { + "keyword": "Then ", + "name": "I should be redirected to the dashboard", + "line": 12, + "match": { + "location": "step_definitions/login_steps.js:30" + }, + "result": { + "status": "passed", + "duration": 543210987 + } + } + ] + }, + { + "id": "user-login;failed-login-with-invalid-credentials", + "keyword": "Scenario", + "name": "Failed login with invalid credentials", + "description": "", + "line": 15, + "type": "scenario", + "tags": [ + { + "name": "@negative", + "line": 14 + } + ], + "steps": [ + { + "keyword": "Given ", + "name": "I am on the login page", + "line": 16, + "match": { + "location": "step_definitions/login_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1234567890 + } + }, + { + "keyword": "When ", + "name": "I enter invalid username \"baduser\"", + "line": 17, + "match": { + "location": "step_definitions/login_steps.js:15" + }, + "result": { + "status": "passed", + "duration": 987654321 + } + }, + { + "keyword": "And ", + "name": "I enter invalid password \"wrongpass\"", + "line": 18, + "match": { + "location": "step_definitions/login_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 876543210 + } + }, + { + "keyword": "And ", + "name": "I click the login button", + "line": 19, + "match": { + "location": "step_definitions/login_steps.js:25" + }, + "result": { + "status": "passed", + "duration": 2345678901 + } + }, + { + "keyword": "Then ", + "name": "I should see an error message \"Invalid credentials\"", + "line": 20, + "match": { + "location": "step_definitions/login_steps.js:35" + }, + "result": { + "status": "failed", + "duration": 543210987, + "error_message": "AssertionError: expected 'Please try again' to equal 'Invalid credentials'" + } + } + ] + } + ] + } +] diff --git a/tests_e2e/reports_cucumber/sample_cucumber_advanced.json b/tests_e2e/reports_cucumber/sample_cucumber_advanced.json new file mode 100644 index 0000000..19ac15a --- /dev/null +++ b/tests_e2e/reports_cucumber/sample_cucumber_advanced.json @@ -0,0 +1,234 @@ +[ + { + "uri": "features/shopping_cart.feature", + "id": "shopping-cart", + "keyword": "Feature", + "name": "Shopping Cart", + "description": " As a customer\n I want to manage my shopping cart\n So that I can purchase items", + "line": 1, + "tags": [ + { + "name": "@shopping", + "line": 1 + }, + { + "name": "@cart", + "line": 1 + } + ], + "elements": [ + { + "id": "shopping-cart;background", + "keyword": "Background", + "name": "User is logged in", + "description": "", + "line": 5, + "type": "background", + "steps": [ + { + "keyword": "Given ", + "name": "I am logged in as a customer", + "line": 6, + "match": { + "location": "step_definitions/auth_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + }, + { + "keyword": "And ", + "name": "my shopping cart is empty", + "line": 7, + "match": { + "location": "step_definitions/cart_steps.js:5" + }, + "result": { + "status": "passed", + "duration": 500000000 + } + } + ] + }, + { + "id": "shopping-cart;add-items-to-cart", + "keyword": "Scenario Outline", + "name": "Add items to cart", + "description": "", + "line": 10, + "type": "scenario_outline", + "tags": [ + { + "name": "@positive", + "line": 9 + } + ], + "steps": [ + { + "keyword": "When ", + "name": "I add \"\" of \"\" to my cart", + "line": 11, + "match": { + "location": "step_definitions/cart_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 2000000000 + } + }, + { + "keyword": "Then ", + "name": "my cart should contain \"\" items", + "line": 12, + "match": { + "location": "step_definitions/cart_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + }, + { + "keyword": "And ", + "name": "the total price should be \"\"", + "line": 13, + "match": { + "location": "step_definitions/cart_steps.js:30" + }, + "result": { + "status": "passed", + "duration": 500000000 + } + } + ], + "examples": [ + { + "keyword": "Examples", + "name": "Valid products", + "description": "", + "line": 15, + "tags": [ + { + "name": "@products", + "line": 14 + } + ], + "rows": [ + { + "cells": ["quantity", "product", "price"], + "line": 16 + }, + { + "cells": ["1", "Laptop", "$1000"], + "line": 17 + }, + { + "cells": ["2", "Mouse", "$40"], + "line": 18 + }, + { + "cells": ["3", "Keyboard", "$150"], + "line": 19 + } + ] + } + ] + } + ] + }, + { + "uri": "features/payment.feature", + "id": "payment-processing", + "keyword": "Feature", + "name": "Payment Processing", + "description": " Customers can pay using various methods", + "line": 1, + "tags": [ + { + "name": "@payment", + "line": 1 + } + ], + "elements": [ + { + "id": "payment-processing;payment-validation", + "keyword": "Rule", + "name": "Payment validation", + "description": " All payments must be validated before processing", + "line": 5, + "type": "rule", + "tags": [ + { + "name": "@validation", + "line": 4 + } + ], + "children": [ + { + "id": "payment-processing;payment-validation;background", + "keyword": "Background", + "name": "Setup payment environment", + "description": "", + "line": 8, + "type": "background", + "steps": [ + { + "keyword": "Given ", + "name": "the payment gateway is available", + "line": 9, + "match": { + "location": "step_definitions/payment_steps.js:5" + }, + "result": { + "status": "passed", + "duration": 1500000000 + } + } + ] + }, + { + "id": "payment-processing;payment-validation;valid-credit-card", + "keyword": "Scenario", + "name": "Valid credit card payment", + "description": "", + "line": 11, + "type": "scenario", + "tags": [ + { + "name": "@credit-card", + "line": 10 + } + ], + "steps": [ + { + "keyword": "When ", + "name": "I pay with a valid credit card", + "line": 12, + "match": { + "location": "step_definitions/payment_steps.js:10" + }, + "result": { + "status": "passed", + "duration": 3000000000 + } + }, + { + "keyword": "Then ", + "name": "the payment should be approved", + "line": 13, + "match": { + "location": "step_definitions/payment_steps.js:20" + }, + "result": { + "status": "passed", + "duration": 1000000000 + } + } + ] + } + ] + } + ] + } +] diff --git a/tests_e2e/reports_gherkin/sample_bdd.feature b/tests_e2e/reports_gherkin/sample_bdd.feature new file mode 100644 index 0000000..945200a --- /dev/null +++ b/tests_e2e/reports_gherkin/sample_bdd.feature @@ -0,0 +1,23 @@ +@smoke @authentication +Feature: User Authentication + As a user + I want to authenticate securely + So that I can access the system + + @positive @login + Scenario: Successful login with valid credentials + Given I am on the login page + When I enter valid username "testuser" + And I enter valid password "password123" + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message + + @negative @login + Scenario: Failed login with invalid password + Given I am on the login page + When I enter valid username "testuser" + And I enter invalid password "wrongpass" + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page diff --git a/tests_e2e/reports_gherkin/sample_login.feature b/tests_e2e/reports_gherkin/sample_login.feature new file mode 100644 index 0000000..e0287b4 --- /dev/null +++ b/tests_e2e/reports_gherkin/sample_login.feature @@ -0,0 +1,41 @@ +Feature: User Login + As a registered user + I want to log in to the application + So that I can access my account + + Background: + Given the application is running + And I am on the login page + + @smoke @authentication + Scenario: Successful login with valid credentials + Given I have a valid username "testuser" + And I have a valid password "password123" + When I enter my credentials + And I click the login button + Then I should be redirected to the dashboard + And I should see a welcome message "Welcome, testuser" + + @negative @authentication + Scenario: Failed login with invalid password + Given I have a valid username "testuser" + And I have an invalid password "wrongpassword" + When I enter my credentials + And I click the login button + Then I should see an error message "Invalid credentials" + And I should remain on the login page + + @edge-case + Scenario Outline: Login attempts with various credentials + Given I have username "" + And I have password "" + When I enter my credentials + And I click the login button + Then I should see result "" + + Examples: + | username | password | result | + | admin | admin123 | Dashboard | + | testuser | test123 | Dashboard | + | invalid | invalid123 | Invalid credentials | + | empty | | Password required | diff --git a/tests_e2e/test_end2end.py b/tests_e2e/test_end2end.py index 2879972..bd785a0 100644 --- a/tests_e2e/test_end2end.py +++ b/tests_e2e/test_end2end.py @@ -26,7 +26,7 @@ def _run_cmd(multiline_cmd: str): process = subprocess.Popen(single_line_cmd, shell=True, stdout=subprocess.PIPE) with process.stdout: output = "" - for line in iter(process.stdout.readline, b''): + for line in iter(process.stdout.readline, b""): output += line.decode() print(output) process.wait() @@ -57,7 +57,7 @@ def _run_cmd_allow_failure(multiline_cmd: str): process = subprocess.Popen(single_line_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) with process.stdout: output = "" - for line in iter(process.stdout.readline, b''): + for line in iter(process.stdout.readline, b""): output += line.decode() print(output) process.wait() @@ -69,53 +69,58 @@ class TestsEndToEnd: # TestRail 101 instance has the required configuration for this test run TR_INSTANCE = "https://testrail101.testrail.io/" # Uncomment and enter your credentials below in order to execute the tests locally - #os.environ.setdefault("TR_CLI_USERNAME", "") - #os.environ.setdefault("TR_CLI_PASSWORD", "") + # os.environ.setdefault("TR_CLI_USERNAME", "") + # os.environ.setdefault("TR_CLI_PASSWORD", "") @pytest.fixture(autouse=True, scope="module") def install_trcli(self): _run_cmd("cd .. && pip install .") def test_cli_robot_report_RF50(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_robot \\ --title "[CLI-E2E-Tests] ROBOT FRAMEWORK PARSER" \\ -f "reports_robot/simple_report_RF50.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) def test_cli_robot_report_RF70(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_robot \\ --title "[CLI-E2E-Tests] ROBOT FRAMEWORK PARSER" \\ -f "reports_robot/simple_report_RF50.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) def test_cli_plan_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -123,19 +128,21 @@ def test_cli_plan_id(self): --plan-id 1578 \\ --title "[CLI-E2E-Tests] With Plan ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_plan_id_and_config_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -144,19 +151,21 @@ def test_cli_plan_id_and_config_id(self): --config-ids 142,143 \\ --title "[CLI-E2E-Tests] With Plan ID and Config ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_update_run_in_plan(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -164,19 +173,21 @@ def test_cli_update_run_in_plan(self): --run-id 1550 \\ --title "[CLI-E2E-Tests] Update Run in Plan" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) - + def test_cli_update_run_in_plan_with_configs(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -184,38 +195,42 @@ def test_cli_update_run_in_plan_with_configs(self): --run-id 1551 \\ --title "[CLI-E2E-Tests] Update Run in Plan with Configs" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Matcher: AUTO" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto_update_run(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -224,38 +239,42 @@ def test_cli_matchers_auto_update_run(self): --run-id "1568" \\ --milestone-id "107" \\ -f "reports_junit/generic_ids_auto_plus_one.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [GENERIC-IDS-AUTO]", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) def test_cli_matchers_auto_multiple_files(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Matcher: AUTO with multiple files" \\ -f "reports_junit/junit_multiple_parts_*" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [MULTIPART-REPORT-2]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 4 test results in" - ] + "Submitted 4 test results in", + ], ) - + def test_cli_matchers_name(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -n \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -263,7 +282,8 @@ def test_cli_matchers_name(self): --title "[CLI-E2E-Tests] Matcher: NAME" \\ --case-matcher "NAME" \\ -f "reports_junit/generic_ids_name.xml" - """) + """ + ) _assert_contains( output, [ @@ -271,12 +291,13 @@ def test_cli_matchers_name(self): "Found 3 test cases without case ID in the report file.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def test_cli_matchers_property(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -n \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -284,7 +305,8 @@ def test_cli_matchers_property(self): --title "[CLI-E2E-Tests] Matcher: PROPERTY" \\ --case-matcher "PROPERTY" \\ -f "reports_junit/generic_ids_property.xml" - """) + """ + ) _assert_contains( output, [ @@ -292,30 +314,34 @@ def test_cli_matchers_property(self): "Found 3 test cases without case ID in the report file.", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def test_cli_attachments(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Attachments test" \\ -f "reports_junit/attachments.xml" - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in section [ATTACHMENTS]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 4 attachments for 2 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) + def test_cli_multisuite_with_suite_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ @@ -323,7 +349,8 @@ def test_cli_multisuite_with_suite_id(self): --title "[CLI-E2E-Tests] Multisuite with suite id" \\ --suite-id 128 \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -333,12 +360,13 @@ def test_cli_multisuite_with_suite_id(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) def test_cli_multisuite_with_suite_name(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ @@ -346,7 +374,8 @@ def test_cli_multisuite_with_suite_name(self): --suite-name "My suite" \\ --title "[CLI-E2E-Tests] Multisuite without suite id" \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -356,19 +385,21 @@ def test_cli_multisuite_with_suite_name(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) def test_cli_multisuite_without_suite_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests Multisuite" \\ parse_junit \\ --title "[CLI-E2E-Tests] Multisuite without suite id" \\ -f "reports_junit/duplicate-names.xml" - """) + """ + ) _assert_contains( output, [ @@ -378,12 +409,13 @@ def test_cli_multisuite_without_suite_id(self): "Processed 3 test cases in section [DUPLICATES] Basic", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "No attachments found to upload.", - "Submitted 10 test results in" - ] + "Submitted 10 test results in", + ], ) - + def test_cli_saucelabs(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -391,7 +423,8 @@ def test_cli_saucelabs(self): --title "[CLI-E2E-Tests] saucectl parser" \\ --special-parser "saucectl" \\ -f "reports_junit/saucelabs.xml" - """) + """ + ) _assert_contains( output, [ @@ -399,109 +432,114 @@ def test_cli_saucelabs(self): "Processing JUnit suite - Firefox", "Processing JUnit suite - Chrome", "Processed 1 test cases in section [SAUCELABS]", - f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view" - ] + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + ], ) - + def test_cli_openapi(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_openapi \\ -f "openapi_specs/openapi.yml" - """) - _assert_contains( - output, - [ - "Processed 22 test cases based on possible responses.", - "Submitted 22 test cases" - ] + """ ) + _assert_contains(output, ["Processed 22 test cases based on possible responses.", "Submitted 22 test cases"]) def test_cli_add_run(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Create run_config.yml" \\ -f "run_config.yml" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", "title: [CLI-E2E-Tests] ADD RUN TEST: Create run_config.yml", - "Writing test run data to file (run_config.yml). Done." - ] + "Writing test run data to file (run_config.yml). Done.", + ], ) - + def test_cli_add_run_include_all(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run --run-include-all\\ --title "[CLI-E2E-Tests] ADD RUN TEST: Include All Cases" \\ -f "run_config.yml" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", "title: [CLI-E2E-Tests] ADD RUN TEST: Include All Cases", - "Writing test run data to file (run_config.yml). Done." - ] + "Writing test run data to file (run_config.yml). Done.", + ], ) def test_cli_add_run_upload_results(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ -c run_config.yml \\ parse_junit \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results" - ] + "Submitted 6 test results", + ], ) - + def test_cli_add_run_and_plan_with_due_date(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run --run-include-all \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date" \\ --run-start-date "03/01/2030" --run-end-date "03/12/2030" - """) + """ + ) _assert_contains( output, [ "Creating test run.", f"Test run: {self.TR_INSTANCE}index.php?/runs/view", - "title: [CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date" - ] + "title: [CLI-E2E-Tests] ADD RUN TEST: Test Run with Due Date", + ], ) def test_cli_add_run_refs_with_references(self): """Test creating a run with references""" import random import string - + # Generate random suffix to avoid conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - - output = _run_cmd(f""" + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -509,7 +547,8 @@ def test_cli_add_run_refs_with_references(self): --title "[CLI-E2E-Tests] ADD RUN TEST: With References {random_suffix}" \\ --run-refs "JIRA-100,JIRA-200,REQ-{random_suffix}" \\ -f "run_config_refs.yml" - """) + """ + ) _assert_contains( output, [ @@ -517,32 +556,32 @@ def test_cli_add_run_refs_with_references(self): f"Test run: {self.TR_INSTANCE}index.php?/runs/view", f"title: [CLI-E2E-Tests] ADD RUN TEST: With References {random_suffix}", f"Refs: JIRA-100,JIRA-200,REQ-{random_suffix}", - "Writing test run data to file (run_config_refs.yml). Done." - ] + "Writing test run data to file (run_config_refs.yml). Done.", + ], ) def test_cli_add_run_refs_validation_error(self): """Test references validation (too long)""" long_refs = "A" * 251 # Exceeds 250 character limit - - output, return_code = _run_cmd_allow_failure(f""" + + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ add_run \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Too Long" \\ --run-refs "{long_refs}" - """) - - assert return_code != 0 - _assert_contains( - output, - ["Error: References field cannot exceed 250 characters."] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: References field cannot exceed 250 characters."]) + def test_cli_add_run_refs_update_action_validation(self): """Test that update/delete actions require run_id""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -550,12 +589,15 @@ def test_cli_add_run_refs_update_action_validation(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Invalid Action" \\ --run-refs "JIRA-123" \\ --run-refs-action "update" - """) - + """ + ) + assert return_code != 0 _assert_contains( output, - ["Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required)."] + [ + "Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required)." + ], ) def test_cli_add_run_refs_update_workflow(self): @@ -563,12 +605,13 @@ def test_cli_add_run_refs_update_workflow(self): import random import string import re - + # Generate random suffix to avoid conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Step 1: Create a run with initial references - create_output = _run_cmd(f""" + create_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -576,24 +619,19 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "JIRA-100,JIRA-200" \\ -f "run_config_workflow.yml" - """) - + """ + ) + # Extract run ID from output - run_id_match = re.search(r'run_id: (\d+)', create_output) + run_id_match = re.search(r"run_id: (\d+)", create_output) assert run_id_match, "Could not extract run ID from output" run_id = run_id_match.group(1) - - _assert_contains( - create_output, - [ - "Creating test run.", - f"run_id: {run_id}", - "Refs: JIRA-100,JIRA-200" - ] - ) - + + _assert_contains(create_output, ["Creating test run.", f"run_id: {run_id}", "Refs: JIRA-100,JIRA-200"]) + # Step 2: Add more references to the existing run - add_output = _run_cmd(f""" + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -602,19 +640,14 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "JIRA-300,REQ-{random_suffix}" \\ --run-refs-action "add" - """) - - _assert_contains( - add_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs Action: add" - ] + """ ) - + + _assert_contains(add_output, ["Updating test run.", f"run_id: {run_id}", "Refs Action: add"]) + # Step 3: Update (replace) all references - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -623,20 +656,16 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "NEW-100,NEW-200" \\ --run-refs-action "update" - """) - + """ + ) + _assert_contains( - update_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs: NEW-100,NEW-200", - "Refs Action: update" - ] + update_output, ["Updating test run.", f"run_id: {run_id}", "Refs: NEW-100,NEW-200", "Refs Action: update"] ) - + # Step 4: Delete specific references - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -645,19 +674,14 @@ def test_cli_add_run_refs_update_workflow(self): --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs "NEW-100" \\ --run-refs-action "delete" - """) - - _assert_contains( - delete_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs Action: delete" - ] + """ ) - + + _assert_contains(delete_output, ["Updating test run.", f"run_id: {run_id}", "Refs Action: delete"]) + # Step 5: Delete all references - delete_all_output = _run_cmd(f""" + delete_all_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -665,21 +689,16 @@ def test_cli_add_run_refs_update_workflow(self): --run-id {run_id} \\ --title "[CLI-E2E-Tests] ADD RUN TEST: Refs Workflow {random_suffix}" \\ --run-refs-action "delete" - """) - - _assert_contains( - delete_all_output, - [ - "Updating test run.", - f"run_id: {run_id}", - "Refs: ", - "Refs Action: delete" - ] + """ ) + _assert_contains( + delete_all_output, ["Updating test run.", f"run_id: {run_id}", "Refs: ", "Refs Action: delete"] + ) def bug_test_cli_robot_description_bug(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -687,18 +706,20 @@ def bug_test_cli_robot_description_bug(self): --title "[CLI-E2E-Tests] RUN DESCRIPTION BUG" \\ -f "reports_robot/simple_report_RF50.xml" \\ --run-id 2332 - """) + """ + ) _assert_contains( output, [ "Processed 3 test cases in 2 sections.", "Uploading 1 attachments for 1 test results.", - "Submitted 3 test results in" - ] + "Submitted 3 test results in", + ], ) - + def bug_test_automation_id(self): - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -706,225 +727,221 @@ def bug_test_automation_id(self): parse_junit \\ --title "(DO NOT DELETE) [CLI-E2E-Tests] Test updated Automation ID" \\ -f "reports_junit/generic_ids_auto.xml" - """) + """ + ) _assert_contains( output, [ f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Uploading 1 attachments for 1 test results.", - "Submitted 6 test results" - ] + "Submitted 6 test results", + ], ) def test_labels_full_workflow(self): """Test complete labels workflow: add, list, get, update, delete""" - + # Generate random suffix to avoid conflicts with existing labels import random import string - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) label_title = f"e2e-{random_suffix}" assert len(label_title) <= 20, f"Label title '{label_title}' exceeds 20 characters" - + # Step 1: Add a new label - add_output = _run_cmd(f""" + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label_title}" - """) + """ + ) _assert_contains( - add_output, - [ - f"Adding label '{label_title}'...", - "Successfully added label: ID=", - f"Title='{label_title}'" - ] + add_output, [f"Adding label '{label_title}'...", "Successfully added label: ID=", f"Title='{label_title}'"] ) - + # Extract label ID from the add output import re + label_id_match = re.search(r"ID=(\d+)", add_output) assert label_id_match, f"Could not find label ID in output: {add_output}" label_id = label_id_match.group(1) print(f"Created label with ID: {label_id}") - + # Step 2: List labels to verify it exists - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) - _assert_contains( - list_output, - [ - "Retrieving labels...", - "Found", - f"ID: {label_id}, Title: '{label_title}'" - ] + """ ) - + _assert_contains(list_output, ["Retrieving labels...", "Found", f"ID: {label_id}, Title: '{label_title}'"]) + # Step 3: Get the specific label - get_output = _run_cmd(f""" + get_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id {label_id} - """) + """ + ) _assert_contains( get_output, - [ - f"Retrieving label with ID {label_id}...", - "Label details:", - f"ID: {label_id}", - f"Title: '{label_title}'" - ] + [f"Retrieving label with ID {label_id}...", "Label details:", f"ID: {label_id}", f"Title: '{label_title}'"], ) - + # Step 4: Update the label updated_title = f"upd-{random_suffix}" assert len(updated_title) <= 20, f"Updated title '{updated_title}' exceeds 20 characters" - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels update \\ --id {label_id} \\ --title "{updated_title}" - """) + """ + ) _assert_contains( update_output, [ f"Updating label with ID {label_id}...", f"Successfully updated label: ID={label_id}", - f"Title='{updated_title}'" - ] + f"Title='{updated_title}'", + ], ) - + # Step 5: Verify the update by getting the label again - get_updated_output = _run_cmd(f""" + get_updated_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id {label_id} - """) - _assert_contains( - get_updated_output, - [ - f"ID: {label_id}", - f"Title: '{updated_title}'" - ] + """ ) - + _assert_contains(get_updated_output, [f"ID: {label_id}", f"Title: '{updated_title}'"]) + # Step 6: Delete the label (with confirmation) - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) - _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + """ ) + _assert_contains(delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"]) def test_labels_add_multiple_and_delete_multiple(self): """Test adding multiple labels and deleting them in batch""" - + # Generate random suffix to avoid conflicts with existing labels import random import string - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Add first label label1_title = f"b1-{random_suffix}" assert len(label1_title) <= 20, f"Label1 title '{label1_title}' exceeds 20 characters" - add_output1 = _run_cmd(f""" + add_output1 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label1_title}" - """) - + """ + ) + # Add second label label2_title = f"b2-{random_suffix}" assert len(label2_title) <= 20, f"Label2 title '{label2_title}' exceeds 20 characters" - add_output2 = _run_cmd(f""" + add_output2 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label2_title}" - """) - + """ + ) + # Add third label label3_title = f"b3-{random_suffix}" assert len(label3_title) <= 20, f"Label3 title '{label3_title}' exceeds 20 characters" - add_output3 = _run_cmd(f""" + add_output3 = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{label3_title}" - """) - + """ + ) + # Extract all label IDs import re + label_id1 = re.search(r"ID=(\d+)", add_output1).group(1) label_id2 = re.search(r"ID=(\d+)", add_output2).group(1) label_id3 = re.search(r"ID=(\d+)", add_output3).group(1) - + label_ids = f"{label_id1},{label_id2},{label_id3}" print(f"Created labels with IDs: {label_ids}") - + # Verify all labels exist in list - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) + """ + ) _assert_contains( list_output, [ f"ID: {label_id1}, Title: '{label1_title}'", f"ID: {label_id2}, Title: '{label2_title}'", - f"ID: {label_id3}, Title: '{label3_title}'" - ] + f"ID: {label_id3}, Title: '{label3_title}'", + ], ) - + # Delete all labels in batch - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_ids} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_ids}...", - "Successfully deleted 3 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_ids}...", "Successfully deleted 3 label(s)"] ) def test_labels_help_commands(self): """Test labels help functionality""" - + # Test main labels help - main_help_output = _run_cmd(f""" + main_help_output = _run_cmd( + f""" trcli labels --help - """) + """ + ) _assert_contains( main_help_output, [ @@ -933,27 +950,26 @@ def test_labels_help_commands(self): "delete Delete labels from TestRail", "get Get a specific label by ID", "list List all labels in the project", - "update Update an existing label in TestRail" - ] + "update Update an existing label in TestRail", + ], ) - + # Test add command help - add_help_output = _run_cmd(f""" + add_help_output = _run_cmd( + f""" trcli labels add --help - """) + """ + ) _assert_contains( - add_help_output, - [ - "Add a new label in TestRail", - "--title", - "Title of the label to add (max 20 characters)" - ] + add_help_output, ["Add a new label in TestRail", "--title", "Title of the label to add (max 20 characters)"] ) - + # Test update command help - update_help_output = _run_cmd(f""" + update_help_output = _run_cmd( + f""" trcli labels update --help - """) + """ + ) _assert_contains( update_help_output, [ @@ -961,345 +977,313 @@ def test_labels_help_commands(self): "--id", "--title", "ID of the label to update", - "New title for the label (max 20 characters)" - ] + "New title for the label (max 20 characters)", + ], ) - + # Test delete command help - delete_help_output = _run_cmd(f""" + delete_help_output = _run_cmd( + f""" trcli labels delete --help - """) + """ + ) _assert_contains( - delete_help_output, - [ - "Delete labels from TestRail", - "--ids", - "Comma-separated list of label IDs to delete" - ] + delete_help_output, ["Delete labels from TestRail", "--ids", "Comma-separated list of label IDs to delete"] ) - + # Test list command help - list_help_output = _run_cmd(f""" + list_help_output = _run_cmd( + f""" trcli labels list --help - """) + """ + ) _assert_contains( list_help_output, - [ - "List all labels in the project", - "--offset", - "--limit", - "Offset for pagination", - "Limit for pagination" - ] - ) - + ["List all labels in the project", "--offset", "--limit", "Offset for pagination", "Limit for pagination"], + ) + # Test get command help - get_help_output = _run_cmd(f""" + get_help_output = _run_cmd( + f""" trcli labels get --help - """) - _assert_contains( - get_help_output, - [ - "Get a specific label by ID", - "--id", - "ID of the label to retrieve" - ] + """ ) + _assert_contains(get_help_output, ["Get a specific label by ID", "--id", "ID of the label to retrieve"]) def test_labels_pagination(self): """Test labels pagination functionality""" - + # Test basic list command - list_output = _run_cmd(f""" + list_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list - """) - _assert_contains( - list_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) - + _assert_contains(list_output, ["Retrieving labels...", "Found"]) + # Test pagination with limit - paginated_output = _run_cmd(f""" + paginated_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list \\ --limit 5 - """) - _assert_contains( - paginated_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) - + _assert_contains(paginated_output, ["Retrieving labels...", "Found"]) + # Test pagination with offset and limit - offset_output = _run_cmd(f""" + offset_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels list \\ --offset 0 \\ --limit 10 - """) - _assert_contains( - offset_output, - [ - "Retrieving labels...", - "Found" - ] + """ ) + _assert_contains(offset_output, ["Retrieving labels...", "Found"]) def test_labels_validation_errors(self): """Test labels validation and error handling""" - + # Test title too long (more than 20 characters) - long_title_output, returncode = _run_cmd_allow_failure(f""" + long_title_output, returncode = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "ThisTitleIsWayTooLongForTheValidationLimit" - """) + """ + ) # Should fail with validation error assert returncode != 0, f"Expected validation error but command succeeded: {long_title_output}" assert "Error: Label title must be 20 characters or less." in long_title_output - + # Test invalid label ID for get - invalid_get_output, returncode = _run_cmd_allow_failure(f""" + invalid_get_output, returncode = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels get \\ --id 999999 - """) + """ + ) # Should fail with API error assert returncode != 0, f"Expected API error but command succeeded: {invalid_get_output}" assert "Failed to retrieve label:" in invalid_get_output - + # Test invalid label ID format for delete - invalid_delete_output, returncode = _run_cmd_allow_failure(f""" + invalid_delete_output, returncode = _run_cmd_allow_failure( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids "abc,def" - """) + """ + ) # Should fail with format validation error assert returncode != 0, f"Expected validation error but command succeeded: {invalid_delete_output}" assert "Error: Invalid label IDs format" in invalid_delete_output def test_labels_edge_cases(self): """Test labels edge cases and boundary conditions""" - + # Test with exactly 20 character title (boundary condition) twenty_char_title = "ExactlyTwentyCharss!" # Exactly 20 characters assert len(twenty_char_title) == 20, "Test title should be exactly 20 characters" - - add_output = _run_cmd(f""" + + add_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{twenty_char_title}" - """) - _assert_contains( - add_output, - [ - f"Adding label '{twenty_char_title}'...", - "Successfully added label:" - ] + """ ) - + _assert_contains(add_output, [f"Adding label '{twenty_char_title}'...", "Successfully added label:"]) + # Extract label ID for cleanup import re + label_id_match = re.search(r"ID=(\d+)", add_output) if label_id_match: label_id = label_id_match.group(1) - + # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"] ) - def test_labels_cases_full_workflow(self): """Test complete workflow of test case label operations""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) case_label_title = f"e2e-case-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{case_label_title}" - """) - _assert_contains( - add_label_output, - [ - f"Adding label '{case_label_title}'...", - "Successfully added label:" - ] + """ ) - + _assert_contains(add_label_output, [f"Adding label '{case_label_title}'...", "Successfully added label:"]) + # Extract label ID for later use import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) - + try: # Use known test case IDs that should exist in the test project test_case_ids = ["24964", "24965"] # Multiple test cases for batch testing - + # Add labels to test cases - add_cases_output = _run_cmd(f""" + add_cases_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "{','.join(test_case_ids)}" \\ --title "{case_label_title}" - """) + """ + ) _assert_contains( add_cases_output, [ f"Adding label '{case_label_title}' to {len(test_case_ids)} test case(s)...", - "Successfully processed" - ] + "Successfully processed", + ], ) - + # List test cases by label title - list_by_title_output = _run_cmd(f""" + list_by_title_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "{case_label_title}" - """) + """ + ) _assert_contains( list_by_title_output, - [ - f"Retrieving test cases with label title '{case_label_title}'...", - "matching test case(s):" - ] + [f"Retrieving test cases with label title '{case_label_title}'...", "matching test case(s):"], ) - + # List test cases by label ID - list_by_id_output = _run_cmd(f""" + list_by_id_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --ids "{label_id}" - """) + """ + ) _assert_contains( - list_by_id_output, - [ - f"Retrieving test cases with label IDs: {label_id}...", - "matching test case(s):" - ] + list_by_id_output, [f"Retrieving test cases with label IDs: {label_id}...", "matching test case(s):"] ) - + finally: # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) _assert_contains( - delete_output, - [ - f"Deleting labels with IDs: {label_id}...", - "Successfully deleted 1 label(s)" - ] + delete_output, [f"Deleting labels with IDs: {label_id}...", "Successfully deleted 1 label(s)"] ) def test_labels_cases_validation_errors(self): """Test validation errors for test case label commands""" # Test title too long for add cases - long_title_output, return_code = _run_cmd_allow_failure(f""" + long_title_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "1" \\ --title "this-title-is-way-too-long-for-testrail" - """) - assert return_code != 0 - _assert_contains( - long_title_output, - ["Error: Label title must be 20 characters or less."] + """ ) - + assert return_code != 0 + _assert_contains(long_title_output, ["Error: Label title must be 20 characters or less."]) + # Test invalid case IDs format - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "invalid,ids" \\ --title "test" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid case IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) - + # Test missing filter for list cases - no_filter_output, return_code = _run_cmd_allow_failure(f""" + no_filter_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list - """) - assert return_code != 0 - _assert_contains( - no_filter_output, - ["Error: Either --ids or --title must be provided."] + """ ) - + assert return_code != 0 + _assert_contains(no_filter_output, ["Error: Either --ids or --title must be provided."]) + # Test title too long for list cases - long_title_list_output, return_code = _run_cmd_allow_failure(f""" + long_title_list_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "this-title-is-way-too-long-for-testrail" - """) - assert return_code != 0 - _assert_contains( - long_title_list_output, - ["Error: Label title must be 20 characters or less."] + """ ) + assert return_code != 0 + _assert_contains(long_title_list_output, ["Error: Label title must be 20 characters or less."]) def test_labels_cases_help_commands(self): """Test help output for test case label commands""" @@ -1311,22 +1295,17 @@ def test_labels_cases_help_commands(self): "Usage: trcli labels cases [OPTIONS] COMMAND [ARGS]...", "Manage labels for test cases", "add Add a label to test cases", - "list List test cases filtered by label ID or title" - ] + "list List test cases filtered by label ID or title", + ], ) - + # Test cases add help cases_add_help_output = _run_cmd("trcli labels cases add --help") _assert_contains( cases_add_help_output, - [ - "Usage: trcli labels cases add [OPTIONS]", - "Add a label to test cases", - "--case-ids", - "--title" - ] + ["Usage: trcli labels cases add [OPTIONS]", "Add a label to test cases", "--case-ids", "--title"], ) - + # Test cases list help cases_list_help_output = _run_cmd("trcli labels cases list --help") _assert_contains( @@ -1335,73 +1314,76 @@ def test_labels_cases_help_commands(self): "Usage: trcli labels cases list [OPTIONS]", "List test cases filtered by label ID or title", "--ids", - "--title" - ] + "--title", + ], ) def test_labels_cases_no_matching_cases(self): """Test behavior when no test cases match the specified label""" # Test with non-existent label title - no_match_output = _run_cmd(f""" + no_match_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "non-existent-label" - """) + """ + ) _assert_contains( no_match_output, [ "Retrieving test cases with label title 'non-existent-label'...", "Found 0 matching test case(s):", - "No test cases found with label title 'non-existent-label'." - ] + "No test cases found with label title 'non-existent-label'.", + ], ) - + # Test with non-existent label ID - no_match_id_output = _run_cmd(f""" + no_match_id_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --ids "99999" - """) + """ + ) _assert_contains( no_match_id_output, [ "Retrieving test cases with label IDs: 99999...", "Found 0 matching test case(s):", - "No test cases found with the specified label IDs." - ] + "No test cases found with the specified label IDs.", + ], ) def test_labels_cases_single_case_workflow(self): """Test single case label operations using update_case endpoint""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) single_case_label_title = f"e2e-single-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( - add_label_output, - [ - f"Adding label '{single_case_label_title}'...", - "Successfully added label:" - ] + add_label_output, [f"Adding label '{single_case_label_title}'...", "Successfully added label:"] ) # Extract label ID for later use import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) @@ -1411,77 +1393,80 @@ def test_labels_cases_single_case_workflow(self): single_case_id = "24964" # Add label to single test case - add_single_case_output = _run_cmd(f""" + add_single_case_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases add \\ --case-ids "{single_case_id}" \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( add_single_case_output, [ f"Adding label '{single_case_label_title}' to 1 test case(s)...", "Successfully processed 1 case(s):", - f"Successfully added label '{single_case_label_title}' to case {single_case_id}" - ] + f"Successfully added label '{single_case_label_title}' to case {single_case_id}", + ], ) # Verify the label was added by listing cases with this label - list_cases_output = _run_cmd(f""" + list_cases_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels cases list \\ --title "{single_case_label_title}" - """) + """ + ) _assert_contains( list_cases_output, [ f"Retrieving test cases with label title '{single_case_label_title}'...", "Found 1 matching test case(s):", - f"Case ID: {single_case_id}" - ] + f"Case ID: {single_case_id}", + ], ) finally: # Clean up: delete the test label - _run_cmd(f""" + _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) def test_labels_tests_full_workflow(self): """Test complete workflow of test label operations""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) test_label_title = f"e2e-test-{random_suffix}" - + # First, create a test label - add_label_output = _run_cmd(f""" + add_label_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels add \\ --title "{test_label_title}" - """) - _assert_contains( - add_label_output, - [ - f"Adding label '{test_label_title}'...", - "Successfully added label:" - ] + """ ) + _assert_contains(add_label_output, [f"Adding label '{test_label_title}'...", "Successfully added label:"]) # Extract label ID for cleanup import re + label_id_match = re.search(r"ID=(\d+)", add_label_output) assert label_id_match, "Could not extract label ID from output" label_id = label_id_match.group(1) @@ -1491,126 +1476,122 @@ def test_labels_tests_full_workflow(self): test_ids = ["266149", "266151"] # Real test IDs for functional testing # Test 1: Add labels to tests using --test-ids - add_tests_output = _run_cmd(f""" + add_tests_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-ids "{','.join(test_ids)}" \\ --title "{test_label_title}" - """) - - _assert_contains( - add_tests_output, - [ - f"Adding label '{test_label_title}' to {len(test_ids)} test(s)..." - ] + """ ) + _assert_contains(add_tests_output, [f"Adding label '{test_label_title}' to {len(test_ids)} test(s)..."]) + # Test 2: Add labels to tests using CSV file import os + csv_file_path = os.path.join(os.path.dirname(__file__), "sample_csv", "test_ids.csv") - - add_tests_csv_output = _run_cmd(f""" + + add_tests_csv_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-id-file "{csv_file_path}" \\ --title "{test_label_title}" - """) - + """ + ) + _assert_contains( add_tests_csv_output, - [ - "Loaded 2 test ID(s) from file", - f"Adding label '{test_label_title}' to 2 test(s)..." - ] + ["Loaded 2 test ID(s) from file", f"Adding label '{test_label_title}' to 2 test(s)..."], ) # Test 3: Get test labels for specific tests - get_test_labels_output = _run_cmd(f""" + get_test_labels_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests get \\ --test-ids "{','.join(test_ids)}" - """) + """ + ) _assert_contains( - get_test_labels_output, - [ - f"Retrieving labels for {len(test_ids)} test(s)...", - "Test label information:" - ] + get_test_labels_output, [f"Retrieving labels for {len(test_ids)} test(s)...", "Test label information:"] ) finally: # Cleanup - delete the test label - delete_output = _run_cmd(f""" + delete_output = _run_cmd( + f""" echo "y" | trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels delete \\ --ids {label_id} - """) + """ + ) def test_labels_tests_validation_errors(self): """Test validation errors for test label commands""" import random import string - + # Generate random suffix to avoid label conflicts - random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)) - + random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) + # Test title too long (21 characters exceeds 20 character limit) long_title = f"this-is-a-very-long-title-{random_suffix}" # This will be > 20 chars - title_error_output, return_code = _run_cmd_allow_failure(f""" + title_error_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --test-ids "266149" \\ --title "{long_title}" - """) - assert return_code != 0 - _assert_contains( - title_error_output, - ["exceeds 20 character limit and will be skipped."] + """ ) + assert return_code != 0 + _assert_contains(title_error_output, ["exceeds 20 character limit and will be skipped."]) # Test missing test-ids and file valid_title = f"test-{random_suffix}"[:20] # Ensure valid length - missing_ids_output, return_code = _run_cmd_allow_failure(f""" + missing_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests add \\ --title "{valid_title}" - """) - assert return_code != 0 - _assert_contains( - missing_ids_output, - ["Error: Either --test-ids or --test-id-file must be provided."] + """ ) + assert return_code != 0 + _assert_contains(missing_ids_output, ["Error: Either --test-ids or --test-id-file must be provided."]) # Test invalid label IDs format in list command - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ labels tests list \\ --run-id "1" \\ --ids "invalid,ids" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid label IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) def test_labels_tests_help_commands(self): """Test help output for test label commands""" - + # Test main tests help tests_help_output = _run_cmd("trcli labels tests --help") _assert_contains( @@ -1620,9 +1601,9 @@ def test_labels_tests_help_commands(self): "Manage labels for tests", "Commands:", "add", - "list", - "get" - ] + "list", + "get", + ], ) # Test tests add help @@ -1634,8 +1615,8 @@ def test_labels_tests_help_commands(self): "Add label(s) to tests", "--test-ids", "--test-id-file", - "--title" - ] + "--title", + ], ) # Test tests list help @@ -1646,35 +1627,27 @@ def test_labels_tests_help_commands(self): "Usage: trcli labels tests list [OPTIONS]", "List tests filtered by label ID from specific runs", "--run-id", - "--ids" - ] + "--ids", + ], ) # Test tests get help tests_get_help_output = _run_cmd("trcli labels tests get --help") _assert_contains( tests_get_help_output, - [ - "Usage: trcli labels tests get [OPTIONS]", - "Get the labels of tests using test IDs", - "--test-id" - ] + ["Usage: trcli labels tests get [OPTIONS]", "Get the labels of tests using test IDs", "--test-id"], ) def test_references_cases_help_commands(self): """Test references cases help commands""" - + # Test main references help references_help_output = _run_cmd("trcli references --help") _assert_contains( references_help_output, - [ - "Usage: trcli references [OPTIONS] COMMAND [ARGS]...", - "Manage references in TestRail", - "cases" - ] + ["Usage: trcli references [OPTIONS] COMMAND [ARGS]...", "Manage references in TestRail", "cases"], ) - + # Test references cases help cases_help_output = _run_cmd("trcli references cases --help") _assert_contains( @@ -1683,23 +1656,18 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases [OPTIONS] COMMAND [ARGS]...", "Manage references for test cases", "add", - "update", - "delete" - ] + "update", + "delete", + ], ) - + # Test references cases add help add_help_output = _run_cmd("trcli references cases add --help") _assert_contains( add_help_output, - [ - "Usage: trcli references cases add [OPTIONS]", - "Add references to test cases", - "--case-ids", - "--refs" - ] + ["Usage: trcli references cases add [OPTIONS]", "Add references to test cases", "--case-ids", "--refs"], ) - + # Test references cases update help update_help_output = _run_cmd("trcli references cases update --help") _assert_contains( @@ -1708,10 +1676,10 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases update [OPTIONS]", "Update references on test cases by replacing existing ones", "--case-ids", - "--refs" - ] + "--refs", + ], ) - + # Test references cases delete help delete_help_output = _run_cmd("trcli references cases delete --help") _assert_contains( @@ -1720,66 +1688,66 @@ def test_references_cases_help_commands(self): "Usage: trcli references cases delete [OPTIONS]", "Delete all or specific references from test cases", "--case-ids", - "--refs" - ] + "--refs", + ], ) def test_references_cases_error_scenarios(self): """Test references cases error scenarios""" - + # Test invalid test case IDs format - invalid_ids_output, return_code = _run_cmd_allow_failure(f""" + invalid_ids_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "invalid,ids" \\ --refs "REQ-1" - """) + """ + ) assert return_code != 0 _assert_contains( - invalid_ids_output, - ["Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3)."] + invalid_ids_output, ["Error: Invalid test case IDs format. Use comma-separated integers (e.g., 1,2,3)."] ) - + # Test empty references - empty_refs_output, return_code = _run_cmd_allow_failure(f""" + empty_refs_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "321" \\ --refs ",,," - """) - assert return_code != 0 - _assert_contains( - empty_refs_output, - ["Error: No valid references provided."] + """ ) - + assert return_code != 0 + _assert_contains(empty_refs_output, ["Error: No valid references provided."]) + # Test references too long (over 2000 characters) - long_refs = ','.join([f'REQ-{i}' * 100 for i in range(10)]) # Create very long references - long_refs_output, return_code = _run_cmd_allow_failure(f""" + long_refs = ",".join([f"REQ-{i}" * 100 for i in range(10)]) # Create very long references + long_refs_output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ references cases add \\ --case-ids "321" \\ --refs "{long_refs}" - """) - assert return_code != 0 - _assert_contains( - long_refs_output, - ["exceeds 2000 character limit"] + """ ) + assert return_code != 0 + _assert_contains(long_refs_output, ["exceeds 2000 character limit"]) # ==================== ASSIGN FEATURE TESTS ==================== - + def test_assign_failures_single_user(self): """Test --assign feature with single user""" # Note: This test assumes a valid TestRail user exists in the instance # In a real environment, you would use actual user emails - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1787,7 +1755,8 @@ def test_assign_failures_single_user(self): --title "[CLI-E2E-Tests] Assign Failures - Single User" \\ --assign "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1795,13 +1764,14 @@ def test_assign_failures_single_user(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_multiple_users(self): """Test --assign feature with multiple users (round-robin assignment)""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1809,7 +1779,8 @@ def test_assign_failures_multiple_users(self): --title "[CLI-E2E-Tests] Assign Failures - Multiple Users" \\ --assign "trcli@gurock.io,trcli@testrail.com" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1817,13 +1788,14 @@ def test_assign_failures_multiple_users(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_short_form(self): """Test --assign feature using -a short form""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1831,7 +1803,8 @@ def test_assign_failures_short_form(self): --title "[CLI-E2E-Tests] Assign Failures - Short Form" \\ -a "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1839,35 +1812,38 @@ def test_assign_failures_short_form(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_without_assign_option(self): """Test that normal operation works without --assign option""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] No Assign Option" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ "Auto-assign failures: No", "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", - "Submitted 6 test results in" - ] + "Submitted 6 test results in", + ], ) # Should NOT contain assignment message assert "Assigning failed results:" not in output def test_assign_failures_invalid_user(self): """Test --assign feature with invalid user email""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1875,19 +1851,16 @@ def test_assign_failures_invalid_user(self): --title "[CLI-E2E-Tests] Assign Failures - Invalid User" \\ --assign "invalid.user@nonexistent.com" \\ -f "reports_junit/assign_test_failures.xml" - """) - - assert return_code != 0 - _assert_contains( - output, - [ - "Error: User not found: invalid.user@nonexistent.com" - ] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: User not found: invalid.user@nonexistent.com"]) + def test_assign_failures_mixed_valid_invalid_users(self): """Test --assign feature with mix of valid and invalid users""" - output, return_code = _run_cmd_allow_failure(f""" + output, return_code = _run_cmd_allow_failure( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1895,19 +1868,16 @@ def test_assign_failures_mixed_valid_invalid_users(self): --title "[CLI-E2E-Tests] Assign Failures - Mixed Users" \\ --assign "trcli@gurock.io,invalid.user@nonexistent.com" \\ -f "reports_junit/assign_test_failures.xml" - """) - - assert return_code != 0 - _assert_contains( - output, - [ - "Error: User not found: invalid.user@nonexistent.com" - ] + """ ) + assert return_code != 0 + _assert_contains(output, ["Error: User not found: invalid.user@nonexistent.com"]) + def test_assign_failures_whitespace_handling(self): """Test --assign feature with whitespace in email list""" - output = _run_cmd(f""" + output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1915,7 +1885,8 @@ def test_assign_failures_whitespace_handling(self): --title "[CLI-E2E-Tests] Assign Failures - Whitespace" \\ --assign " trcli@gurock.io , trcli@testrail.com " \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( output, [ @@ -1923,42 +1894,41 @@ def test_assign_failures_whitespace_handling(self): "Processed 6 test cases in section [ASSIGNTESTSUITE]", f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], ) def test_assign_failures_help_documentation(self): """Test that --assign option appears in help documentation""" help_output = _run_cmd("trcli parse_junit --help") _assert_contains( - help_output, - [ - "-a, --assign", - "Comma-separated list of user emails to assign failed", - "test results to." - ] + help_output, ["-a, --assign", "Comma-separated list of user emails to assign", "failed test results to."] ) def test_assign_failures_with_existing_run(self): """Test --assign feature when updating an existing run""" # First create a run - create_output = _run_cmd(f""" + create_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ parse_junit \\ --title "[CLI-E2E-Tests] Assign Failures - Update Run" \\ -f "reports_junit/generic_ids_auto.xml" - """) - + """ + ) + # Extract run ID from output import re - run_id_match = re.search(r'runs/view/(\d+)', create_output) + + run_id_match = re.search(r"runs/view/(\d+)", create_output) assert run_id_match, "Could not extract run ID from output" run_id = run_id_match.group(1) - + # Update the run with failed tests and assignment - update_output = _run_cmd(f""" + update_output = _run_cmd( + f""" trcli -y \\ -h {self.TR_INSTANCE} \\ --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ @@ -1967,14 +1937,345 @@ def test_assign_failures_with_existing_run(self): --title "[CLI-E2E-Tests] Assign Failures - Update Run" \\ --assign "trcli@gurock.io" \\ -f "reports_junit/assign_test_failures.xml" - """) + """ + ) _assert_contains( update_output, [ "Auto-assign failures: Yes (trcli@gurock.io)", f"Updating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view/{run_id}", "Submitted 6 test results in", - "Assigning failed results: 4/4, Done." - ] + "Assigning failed results: 4/4, Done.", + ], + ) + + # ==================== BDD/GHERKIN FEATURE TESTS ==================== + + def test_parse_gherkin_local_parsing(self): + """Test parse_gherkin command for local .feature file parsing (no TestRail upload)""" + output = _run_cmd( + f""" +trcli parse_gherkin \\ + -f "reports_gherkin/sample_login.feature" + """ + ) + _assert_contains( + output, + [ + "Parsing Gherkin feature file:", + "sample_login.feature", + '"suites"', + '"summary"', + '"total_suites"', + '"total_cases"', + ], + ) + + def test_parse_gherkin_with_output_file(self): + """Test parse_gherkin command with output file option""" + output = _run_cmd( + f""" +trcli parse_gherkin \\ + -f "reports_gherkin/sample_login.feature" \\ + --output "parsed_gherkin.json" + """ + ) + _assert_contains( + output, + ["Parsing Gherkin feature file:", "sample_login.feature", "Parsed results saved to", "parsed_gherkin.json"], + ) + + def test_parse_gherkin_pretty_format(self): + """Test parse_gherkin command with pretty print formatting""" + output = _run_cmd( + f""" +trcli parse_gherkin \\ + -f "reports_gherkin/sample_login.feature" \\ + --pretty + """ + ) + _assert_contains(output, ["Parsing Gherkin feature file:", "sample_login.feature", '"suites"', '"summary"']) + + def test_import_gherkin_upload_feature(self): + """Test import_gherkin command to upload .feature file to TestRail""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 + """ + ) + _assert_contains( + output, + [ + "Connecting to TestRail...", + "Uploading feature file to TestRail...", + "Successfully uploaded feature file!", + "Created/updated", + "test case(s)", + "Case IDs:", + ], + ) + + def test_import_gherkin_with_json_output(self): + """Test import_gherkin command with JSON output format""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 \\ + --json-output + """ + ) + _assert_contains( + output, ["Connecting to TestRail...", "Uploading feature file to TestRail...", '"case_ids"', '"count"'] ) - \ No newline at end of file + + def test_import_gherkin_with_verbose(self): + """Test import_gherkin command with verbose logging""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + -v \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 + """ + ) + _assert_contains( + output, + [ + "Connecting to TestRail...", + "Uploading feature file to TestRail...", + "Successfully uploaded feature file!", + ], + ) + + def test_export_gherkin_download_to_stdout(self): + """Test export_gherkin command to download BDD test case to stdout""" + # First, import a feature file to ensure we have a case to export + import_output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 \\ + --json-output + """ + ) + + # Extract case ID from JSON output + import re + import json + + json_start = import_output.find("{") + if json_start >= 0: + json_str = import_output[json_start:] + # Remove "DONE" and any trailing text after the JSON + json_end = json_str.find("}") + if json_end >= 0: + json_str = json_str[: json_end + 1] + output_data = json.loads(json_str) + case_id = output_data.get("case_ids", [])[0] if output_data.get("case_ids") else None + + if case_id: + # Now export the case + export_output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + export_gherkin \\ + --case-id {case_id} + """ + ) + _assert_contains(export_output, ["Connecting to TestRail...", "Retrieving BDD test case", "Feature:"]) + + def test_export_gherkin_download_to_file(self): + """Test export_gherkin command to download BDD test case to file""" + # First, import a feature file to ensure we have a case to export + import_output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "reports_gherkin/sample_bdd.feature" \\ + --section-id 2388 \\ + --json-output + """ + ) + + # Extract case ID from JSON output + import re + import json + + json_start = import_output.find("{") + if json_start >= 0: + json_str = import_output[json_start:] + # Remove "DONE" and any trailing text after the JSON + json_end = json_str.find("}") + if json_end >= 0: + json_str = json_str[: json_end + 1] + output_data = json.loads(json_str) + case_id = output_data.get("case_ids", [])[0] if output_data.get("case_ids") else None + + if case_id: + # Now export the case to a file + export_output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + export_gherkin \\ + --case-id {case_id} \\ + --output "exported_bdd.feature" + """ + ) + _assert_contains( + export_output, + [ + "Connecting to TestRail...", + "Retrieving BDD test case", + "Successfully exported test case", + "exported_bdd.feature", + ], + ) + + def test_parse_cucumber_workflow1_results_only(self): + """Test parse_cucumber Workflow 1: Parse and upload results only (no feature upload)""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_cucumber \\ + --title "[CLI-E2E-Tests] Cucumber Parser - Results Only" \\ + --suite-id 86 \\ + -f "reports_cucumber/sample_cucumber.json" + """ + ) + _assert_contains( + output, + [ + "Parsing Cucumber", + "Processed", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted", + "Results uploaded successfully", + ], + ) + + def test_parse_cucumber_workflow2_with_feature_upload(self): + """Test parse_cucumber Workflow 2: Generate feature, upload, then upload results""" + output = _run_cmd( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + parse_cucumber \\ + --title "[CLI-E2E-Tests] Cucumber Parser - With Feature Upload" \\ + --suite-id 86 \\ + --upload-feature \\ + --feature-section-id 2388 \\ + -f "reports_cucumber/sample_cucumber.json" + """ + ) + _assert_contains( + output, + [ + "Creating BDD test cases from features...", + "Successfully created", + "BDD test case", + "Proceeding to upload test results...", + f"Creating test run. Test run: {self.TR_INSTANCE}index.php?/runs/view", + "Submitted", + "Results uploaded successfully", + ], + ) + + def test_bdd_help_commands(self): + """Test that all BDD commands appear in help documentation""" + + # Test main CLI help shows BDD commands + main_help_output = _run_cmd("trcli --help") + _assert_contains(main_help_output, ["parse_gherkin", "import_gherkin", "export_gherkin", "parse_cucumber"]) + + # Test parse_gherkin help + parse_gherkin_help = _run_cmd("trcli parse_gherkin --help") + _assert_contains( + parse_gherkin_help, + [ + "Parse Gherkin .feature file locally", + "-f, --file", + "--output", + "--pretty", + ], + ) + + # Test import_gherkin help + import_gherkin_help = _run_cmd("trcli import_gherkin --help") + _assert_contains( + import_gherkin_help, + [ + "Upload Gherkin .feature file to TestRail", + "-f, --file", + "--section-id", + "--json-output", + ], + ) + + # Test export_gherkin help + export_gherkin_help = _run_cmd("trcli export_gherkin --help") + _assert_contains( + export_gherkin_help, + ["Export BDD test case from TestRail as .feature file", "--case-id", "--output"], + ) + + # Test parse_cucumber help + parse_cucumber_help = _run_cmd("trcli parse_cucumber --help") + _assert_contains( + parse_cucumber_help, + [ + "Parse Cucumber JSON results and upload to TestRail", + "--upload-feature", + "--feature-section-id", + "--title", + "--suite-id", + ], + ) + + def test_bdd_error_handling_invalid_file(self): + """Test BDD commands with invalid file paths""" + + # Test parse_gherkin with non-existent file + invalid_parse_output, return_code = _run_cmd_allow_failure( + """ +trcli parse_gherkin \\ + -f "nonexistent.feature" + """ + ) + assert return_code != 0 + + # Test import_gherkin with non-existent file + invalid_import_output, return_code = _run_cmd_allow_failure( + f""" +trcli -y \\ + -h {self.TR_INSTANCE} \\ + --project "SA - (DO NOT DELETE) TRCLI-E2E-Tests" \\ + import_gherkin \\ + -f "nonexistent.feature" \\ + --section-id 2388 + """ + ) + assert return_code != 0 diff --git a/trcli/api/api_cache.py b/trcli/api/api_cache.py new file mode 100644 index 0000000..9ab6741 --- /dev/null +++ b/trcli/api/api_cache.py @@ -0,0 +1,205 @@ +""" +API Response Cache Module + +This module provides a session-scoped caching mechanism for API responses +to reduce redundant API calls and improve performance. + +The cache is designed to be: +- Thread-safe +- Session-scoped (per ApiRequestHandler instance) +- Backwards compatible (transparent to existing code) +- Memory-efficient (uses LRU eviction) +""" + +from functools import lru_cache +from typing import Any, Tuple, Optional, Callable +from threading import Lock +from beartype.typing import List, Dict + + +class RequestCache: + """ + Session-scoped cache for API responses. + + This cache stores API responses during a single command execution session + to avoid redundant API calls. Each ApiRequestHandler instance should have + its own cache instance. + + Key features: + - Automatic cache key generation from endpoint and parameters + - LRU eviction policy to prevent unbounded memory growth + - Thread-safe operations + - Simple invalidation mechanism + """ + + def __init__(self, max_size: int = 512): + """ + Initialize the request cache. + + Args: + max_size: Maximum number of cached responses (default: 512) + """ + self.max_size = max_size + self._cache: Dict[str, Any] = {} + self._lock = Lock() + self._hit_count = 0 + self._miss_count = 0 + + def _make_cache_key(self, endpoint: str, params: Optional[Tuple] = None) -> str: + """ + Generate a unique cache key from endpoint and parameters. + + Args: + endpoint: API endpoint (e.g., "get_cases/123") + params: Optional tuple of parameters + + Returns: + String cache key + """ + if params is None: + return endpoint + + # Convert params to a sorted tuple to ensure consistent keys + if isinstance(params, dict): + params_tuple = tuple(sorted(params.items())) + elif isinstance(params, (list, tuple)): + params_tuple = tuple(params) + else: + params_tuple = (params,) + + return f"{endpoint}::{params_tuple}" + + def get(self, endpoint: str, params: Optional[Tuple] = None) -> Optional[Any]: + """ + Retrieve a cached response. + + Args: + endpoint: API endpoint + params: Optional parameters + + Returns: + Cached response or None if not found + """ + cache_key = self._make_cache_key(endpoint, params) + + with self._lock: + if cache_key in self._cache: + self._hit_count += 1 + return self._cache[cache_key] + else: + self._miss_count += 1 + return None + + def set(self, endpoint: str, response: Any, params: Optional[Tuple] = None) -> None: + """ + Store a response in the cache. + + Args: + endpoint: API endpoint + response: Response to cache + params: Optional parameters + """ + cache_key = self._make_cache_key(endpoint, params) + + with self._lock: + # Implement simple LRU: if cache is full, remove oldest entry + if len(self._cache) >= self.max_size: + # Remove the first (oldest) item + first_key = next(iter(self._cache)) + del self._cache[first_key] + + self._cache[cache_key] = response + + def invalidate(self, endpoint: Optional[str] = None, params: Optional[Tuple] = None) -> None: + """ + Invalidate cache entries. + + Args: + endpoint: If provided, invalidate only this endpoint. + If None, clear entire cache. + params: Optional parameters to narrow invalidation + """ + with self._lock: + if endpoint is None: + # Clear entire cache + self._cache.clear() + else: + cache_key = self._make_cache_key(endpoint, params) + if cache_key in self._cache: + del self._cache[cache_key] + + def invalidate_pattern(self, pattern: str) -> None: + """ + Invalidate all cache entries matching a pattern. + + Args: + pattern: String pattern to match against cache keys + """ + with self._lock: + keys_to_delete = [key for key in self._cache if pattern in key] + for key in keys_to_delete: + del self._cache[key] + + def get_or_fetch( + self, + endpoint: str, + fetch_func: Callable[[], Tuple[Any, str]], + params: Optional[Tuple] = None, + force_refresh: bool = False, + ) -> Tuple[Any, str]: + """ + Get cached response or fetch if not cached. + + This is the main method for integrating caching into existing code. + It transparently handles cache hits/misses and maintains the same + return signature as the original fetch functions. + + Args: + endpoint: API endpoint + fetch_func: Function to call if cache miss (should return (data, error)) + params: Optional parameters for cache key + force_refresh: If True, bypass cache and fetch fresh data + + Returns: + Tuple of (data, error_message) matching API call signature + """ + if not force_refresh: + cached = self.get(endpoint, params) + if cached is not None: + # Return cached result + return cached + + # Cache miss or force refresh - fetch fresh data + result = fetch_func() + + # Only cache successful responses (no error) + data, error = result + if not error: + self.set(endpoint, result, params) + + return result + + def get_stats(self) -> Dict[str, int]: + """ + Get cache statistics. + + Returns: + Dictionary with hit_count, miss_count, size, and hit_rate + """ + with self._lock: + total = self._hit_count + self._miss_count + hit_rate = (self._hit_count / total * 100) if total > 0 else 0.0 + + return { + "hit_count": self._hit_count, + "miss_count": self._miss_count, + "size": len(self._cache), + "hit_rate": hit_rate, + } + + def clear(self) -> None: + """Clear all cached data and reset statistics.""" + with self._lock: + self._cache.clear() + self._hit_count = 0 + self._miss_count = 0 diff --git a/trcli/api/api_client.py b/trcli/api/api_client.py index a412754..1742b38 100644 --- a/trcli/api/api_client.py +++ b/trcli/api/api_client.py @@ -39,7 +39,7 @@ class APIClient: PREFIX = "index.php?" VERSION = "/api/v2/" SUFFIX_API_V2_VERSION = f"{PREFIX}{VERSION}" - RETRY_ON = [429, 500, 502] + RETRY_ON = [429, 500, 502, 503, 504] # Added 503 Service Unavailable and 504 Gateway Timeout USER_AGENT = "TRCLI" def __init__( @@ -176,6 +176,12 @@ def __send_request( if status_code == 429: retry_time = float(response.headers["Retry-After"]) sleep(retry_time) + elif status_code in [500, 502, 503, 504] and i < self.retries: + backoff_time = min(2**i, 30) # Exponential backoff capped at 30 seconds + self.logging_function( + f"Server error {status_code}, retrying in {backoff_time}s (attempt {i+1}/{self.retries})..." + ) + sleep(backoff_time) try: # workaround for buggy legacy TR server version response if response.content.startswith(b"USER AUTHENTICATION SUCCESSFUL!\n"): diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 3dcd196..6f8f6fb 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1,18 +1,36 @@ -import html, json +import os +import time from concurrent.futures import ThreadPoolExecutor, as_completed from beartype.typing import List, Union, Tuple, Dict from trcli.api.api_client import APIClient, APIClientResult from trcli.api.api_response_verify import ApiResponseVerify +from trcli.api.api_cache import RequestCache +from trcli.api.label_manager import LabelManager +from trcli.api.reference_manager import ReferenceManager +from trcli.api.case_matcher import CaseMatcherFactory +from trcli.api.suite_handler import SuiteHandler +from trcli.api.section_handler import SectionHandler +from trcli.api.result_handler import ResultHandler +from trcli.api.run_handler import RunHandler +from trcli.api.bdd_handler import BddHandler +from trcli.api.case_handler import CaseHandler from trcli.cli import Environment from trcli.constants import ( ProjectErrors, - FAULT_MAPPING, OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID, + FAULT_MAPPING, + OLD_SYSTEM_NAME_AUTOMATION_ID, + UPDATED_SYSTEM_NAME_AUTOMATION_ID, ) from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailCase, ProjectData from trcli.data_providers.api_data_provider import ApiDataProvider -from trcli.settings import MAX_WORKERS_ADD_RESULTS, MAX_WORKERS_ADD_CASE +from trcli.settings import ( + MAX_WORKERS_ADD_RESULTS, + MAX_WORKERS_ADD_CASE, + ENABLE_PARALLEL_PAGINATION, + MAX_WORKERS_PARALLEL_PAGINATION, +) class ApiRequestHandler: @@ -33,10 +51,39 @@ def __init__( environment.case_fields, environment.run_description, environment.result_fields, - environment.section_id + environment.section_id, ) self.suites_data_from_provider = self.data_provider.suites_input self.response_verifier = ApiResponseVerify(verify) + # Initialize session-scoped cache for API responses + self._cache = RequestCache(max_size=512) + # Initialize specialized managers + self.label_manager = LabelManager(api_client, environment) + self.reference_manager = ReferenceManager(api_client, environment) + self.suite_handler = SuiteHandler( + api_client, environment, self.data_provider, get_all_suites_callback=self.__get_all_suites + ) + self.section_handler = SectionHandler( + api_client, environment, self.data_provider, get_all_sections_callback=self.__get_all_sections + ) + self.result_handler = ResultHandler( + api_client, + environment, + self.data_provider, + get_all_tests_in_run_callback=self.__get_all_tests_in_run, + handle_futures_callback=self.handle_futures, + ) + self.run_handler = RunHandler( + api_client, environment, self.data_provider, get_all_tests_in_run_callback=self.__get_all_tests_in_run + ) + self.bdd_handler = BddHandler(api_client, environment) + self.case_handler = CaseHandler( + api_client, + environment, + self.data_provider, + handle_futures_callback=self.handle_futures, + retrieve_results_callback=ApiRequestHandler.retrieve_results_after_cancelling, + ) def check_automation_id_field(self, project_id: int) -> Union[str, None]: """ @@ -48,22 +95,24 @@ def check_automation_id_field(self, project_id: int) -> Union[str, None]: if not response.error_message: fields: List = response.response_text automation_id_field = next( - filter( + filter( lambda x: x["system_name"] in [OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID], - fields + fields, ), - None + None, ) if automation_id_field: if automation_id_field["is_active"] is False: return FAULT_MAPPING["automation_id_unavailable"] if not automation_id_field["configs"]: self._active_automation_id_field = automation_id_field["system_name"] + self.case_handler._active_automation_id_field = automation_id_field["system_name"] return None for config in automation_id_field["configs"]: context = config["context"] if context["is_global"] or project_id in context["project_ids"]: self._active_automation_id_field = automation_id_field["system_name"] + self.case_handler._active_automation_id_field = automation_id_field["system_name"] return None return FAULT_MAPPING["automation_id_unavailable"] else: @@ -79,11 +128,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project """ projects_data, error = self.__get_all_projects() if not error: - available_projects = [ - project - for project in projects_data - if project["name"] == project_name - ] + available_projects = [project for project in projects_data if project["name"] == project_name] if len(available_projects) == 1: return ProjectData( @@ -94,9 +139,7 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project elif len(available_projects) > 1: if project_id in [project["id"] for project in available_projects]: project_index = [ - index - for index, project in enumerate(available_projects) - if project["id"] == project_id + index for index, project in enumerate(available_projects) if project["id"] == project_id ][0] return ProjectData( project_id=int(available_projects[project_index]["id"]), @@ -123,643 +166,107 @@ def get_project_data(self, project_name: str, project_id: int = None) -> Project ) def check_suite_id(self, project_id: int) -> Tuple[bool, str]: - """ - Check if suite from DataProvider exist using get_suites endpoint - :project_id: project id - :returns: True if exists in suites. False if not. - """ suite_id = self.suites_data_from_provider.suite_id - suites_data, error = self.__get_all_suites(project_id) - if not error: - available_suites = [ - suite - for suite in suites_data - if suite["id"] == suite_id - ] - return ( - (True, "") - if len(available_suites) > 0 - else (False, FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) - ) - else: - return None, suites_data.error_message + return self.suite_handler.check_suite_id(project_id, suite_id) def resolve_suite_id_using_name(self, project_id: int) -> Tuple[int, str]: - """Get suite ID matching suite name on data provider or returns -1 if unable to match any suite. - :arg project_id: project id - :returns: tuple with id of the suite and error message""" - suite_id = -1 suite_name = self.suites_data_from_provider.name - suites_data, error = self.__get_all_suites(project_id) - if not error: - for suite in suites_data: - if suite["name"] == suite_name: - suite_id = suite["id"] - self.data_provider.update_data([{"suite_id": suite["id"], "name": suite["name"]}]) - break - return ( - (suite_id, "") - if suite_id != -1 - else (-1, FAULT_MAPPING["missing_suite_by_name"].format(suite_name=suite_name)) - ) - else: - return -1, error + return self.suite_handler.resolve_suite_id_using_name(project_id, suite_name) def get_suite_ids(self, project_id: int) -> Tuple[List[int], str]: - """Get suite IDs for requested project_id. - : project_id: project id - : returns: tuple with list of suite ids and error string""" - available_suites = [] - returned_resources = [] - suites_data, error = self.__get_all_suites(project_id) - if not error: - for suite in suites_data: - available_suites.append(suite["id"]) - returned_resources.append( - { - "suite_id": suite["id"], - "name": suite["name"], - } - ) - if returned_resources: - self.data_provider.update_data(suite_data=returned_resources) - else: - print("Update skipped") - return ( - (available_suites, "") - if len(available_suites) > 0 - else ([], FAULT_MAPPING["no_suites_found"].format(project_id=project_id)) - ) - else: - return [], error + return self.suite_handler.get_suite_ids(project_id) def add_suites(self, project_id: int) -> Tuple[List[Dict], str]: - """ - Adds suites that doesn't have ID's in DataProvider. - Runs update_data in data_provider for successfully created resources. - :project_id: project_id - :returns: Tuple with list of dict created resources and error string. - """ - add_suite_data = self.data_provider.add_suites_data() - responses = [] - error_message = "" - for body in add_suite_data: - response = self.client.send_post(f"add_suite/{project_id}", body) - if not response.error_message: - responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): - responses.append(response) - error_message = FAULT_MAPPING["data_verification_error"] - break - else: - error_message = response.error_message - break - - returned_resources = [ - { - "suite_id": response.response_text["id"], - "name": response.response_text["name"], - } - for response in responses - ] - self.data_provider.update_data(suite_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" - return returned_resources, error_message + return self.suite_handler.add_suites(project_id, verify_callback=self.response_verifier.verify_returned_data) def check_missing_section_ids(self, project_id: int) -> Tuple[bool, str]: - """ - Check what section id's are missing in DataProvider. - :project_id: project_id - :returns: Tuple with list missing section ID and error string. - """ suite_id = self.suites_data_from_provider.suite_id - returned_sections, error_message = self.__get_all_sections(project_id, suite_id) - if not error_message: - missing_test_sections = False - sections_by_id = {section["id"]: section for section in returned_sections} - sections_by_name = {section["name"]: section for section in returned_sections} - section_data = [] - for section in self.suites_data_from_provider.testsections: - if self.environment.section_id: - if section.section_id in sections_by_id.keys(): - section_json = sections_by_id[section.section_id] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) - else: - missing_test_sections = True - if section.name in sections_by_name.keys(): - section_json = sections_by_name[section.name] - section_data.append({ - "section_id": section_json["id"], - "suite_id": section_json["suite_id"], - "name": section_json["name"], - }) - else: - missing_test_sections = True - self.data_provider.update_data(section_data=section_data) - return missing_test_sections, error_message - else: - return False, error_message + return self.section_handler.check_missing_section_ids(project_id, suite_id, self.suites_data_from_provider) def add_sections(self, project_id: int) -> Tuple[List[Dict], str]: - """ - Add sections that doesn't have ID in DataProvider. - Runs update_data in data_provider for successfully created resources. - :project_id: project_id - :returns: Tuple with list of dict created resources and error string. - """ - add_sections_data = self.data_provider.add_sections_data() - responses = [] - error_message = "" - for body in add_sections_data: - response = self.client.send_post(f"add_section/{project_id}", body) - if not response.error_message: - responses.append(response) - if not self.response_verifier.verify_returned_data( - body, response.response_text - ): - responses.append(response) - error_message = FAULT_MAPPING["data_verification_error"] - break - else: - error_message = response.error_message - break - returned_resources = [ - { - "section_id": response.response_text["id"], - "suite_id": response.response_text["suite_id"], - "name": response.response_text["name"], - } - for response in responses - ] - self.data_provider.update_data(section_data=returned_resources) if len( - returned_resources - ) > 0 else "Update skipped" - return returned_resources, error_message + return self.section_handler.add_sections( + project_id, verify_callback=self.response_verifier.verify_returned_data + ) def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: """ - Check what test cases id's are missing in DataProvider. + Check what test cases id's are missing in DataProvider using the configured matcher strategy. :project_id: project_id :returns: Tuple with list test case ID missing and error string. """ - missing_cases_number = 0 suite_id = self.suites_data_from_provider.suite_id - returned_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return False, error_message - if self.environment.case_matcher == MatchersParser.AUTO: - test_cases_by_aut_id = {} - for case in returned_cases: - aut_case_id = case.get(OLD_SYSTEM_NAME_AUTOMATION_ID) or case.get(UPDATED_SYSTEM_NAME_AUTOMATION_ID) - if aut_case_id: - aut_case_id = html.unescape(aut_case_id) - test_cases_by_aut_id[aut_case_id] = case - test_case_data = [] - for section in self.suites_data_from_provider.testsections: - for test_case in section.testcases: - aut_id = test_case.custom_automation_id - if aut_id in test_cases_by_aut_id.keys(): - case = test_cases_by_aut_id[aut_id] - test_case_data.append({ - "case_id": case["id"], - "section_id": case["section_id"], - "title": case["title"], - OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id - }) - else: - missing_cases_number += 1 - self.data_provider.update_data(case_data=test_case_data) - if missing_cases_number: - self.environment.log(f"Found {missing_cases_number} test cases not matching any TestRail case.") - else: - nonexistent_ids = [] - all_case_ids = [case["id"] for case in returned_cases] - for section in self.suites_data_from_provider.testsections: - for test_case in section.testcases: - if not test_case.case_id: - missing_cases_number += 1 - elif int(test_case.case_id) not in all_case_ids: - nonexistent_ids.append(test_case.case_id) - if missing_cases_number: - self.environment.log(f"Found {missing_cases_number} test cases without case ID in the report file.") - if nonexistent_ids: - self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") - return False, "Case IDs not in TestRail project or suite were detected in the report file." - - return missing_cases_number > 0, "" + + # Create appropriate matcher based on configuration (Strategy pattern) + matcher = CaseMatcherFactory.create_matcher(self.environment.case_matcher, self.environment, self.data_provider) + + # Delegate to the matcher + return matcher.check_missing_cases( + project_id, + suite_id, + self.suites_data_from_provider, + get_all_cases_callback=self.__get_all_cases, + validate_case_ids_callback=self.__validate_case_ids_exist, + ) def add_cases(self) -> Tuple[List[dict], str]: - """ - Add cases that doesn't have ID in DataProvider. - Runs update_data in data_provider for successfully created resources. - :returns: Tuple with list of dict created resources and error string. - """ - add_case_data = self.data_provider.add_cases() - responses = [] - error_message = "" - with self.environment.get_progress_bar( - results_amount=len(add_case_data), prefix="Adding test cases" - ) as progress_bar: - with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_CASE) as executor: - futures = { - executor.submit( - self._add_case_and_update_data, - body, - ): body - for body in add_case_data - } - responses, error_message = self.handle_futures( - futures=futures, action_string="add_case", progress_bar=progress_bar - ) - if error_message: - # When error_message is present we cannot be sure that responses contains all added items. - # Iterate through futures to get all responses from done tasks (not cancelled) - responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) - returned_resources = [ - { - "case_id": response.response_text["id"], - "section_id": response.response_text["section_id"], - "title": response.response_text["title"] - } - for response in responses - ] - return returned_resources, error_message + return self.case_handler.add_cases() def add_run( - self, - project_id: int, - run_name: str, - milestone_id: int = None, - start_date: str = None, - end_date: str = None, - plan_id: int = None, - config_ids: List[int] = None, - assigned_to_id: int = None, - include_all: bool = False, - refs: str = None, - case_ids: List[int] = None, + self, + project_id: int, + run_name: str, + milestone_id: int = None, + start_date: str = None, + end_date: str = None, + plan_id: int = None, + config_ids: List[int] = None, + assigned_to_id: int = None, + include_all: bool = False, + refs: str = None, + case_ids: List[int] = None, ) -> Tuple[int, str]: - """ - Creates a new test run. - :project_id: project_id - :run_name: run name - :returns: Tuple with run id and error string. - """ - add_run_data = self.data_provider.add_run( + return self.run_handler.add_run( + project_id, run_name, - case_ids=case_ids, - start_date=start_date, - end_date=end_date, - milestone_id=milestone_id, - assigned_to_id=assigned_to_id, - include_all=include_all, - refs=refs, + milestone_id, + start_date, + end_date, + plan_id, + config_ids, + assigned_to_id, + include_all, + refs, + case_ids, ) - if not plan_id: - response = self.client.send_post(f"add_run/{project_id}", add_run_data) - run_id = response.response_text.get("id") - else: - if config_ids: - add_run_data["config_ids"] = config_ids - entry_data = { - "name": add_run_data["name"], - "suite_id": add_run_data["suite_id"], - "config_ids": config_ids, - "runs": [add_run_data] - } - else: - entry_data = add_run_data - response = self.client.send_post(f"add_plan_entry/{plan_id}", entry_data) - run_id = response.response_text["runs"][0]["id"] - return run_id, response.error_message - def update_run(self, run_id: int, run_name: str, start_date: str = None, - end_date: str = None, milestone_id: int = None, refs: str = None, refs_action: str = 'add') -> Tuple[dict, str]: - """ - Updates an existing run - :run_id: run id - :run_name: run name - :refs: references to manage - :refs_action: action to perform ('add', 'update', 'delete') - :returns: Tuple with run and error string. - """ - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.error_message: - return None, run_response.error_message - - existing_description = run_response.response_text.get("description", "") - existing_refs = run_response.response_text.get("refs", "") - - add_run_data = self.data_provider.add_run(run_name, start_date=start_date, - end_date=end_date, milestone_id=milestone_id) - add_run_data["description"] = existing_description # Retain the current description - - # Handle references based on action - if refs is not None: - updated_refs = self._manage_references(existing_refs, refs, refs_action) - add_run_data["refs"] = updated_refs - else: - add_run_data["refs"] = existing_refs # Keep existing refs if none provided - - run_tests, error_message = self.__get_all_tests_in_run(run_id) - run_case_ids = [test["case_id"] for test in run_tests] - report_case_ids = add_run_data["case_ids"] - joint_case_ids = list(set(report_case_ids + run_case_ids)) - add_run_data["case_ids"] = joint_case_ids - - plan_id = run_response.response_text["plan_id"] - config_ids = run_response.response_text["config_ids"] - if not plan_id: - update_response = self.client.send_post(f"update_run/{run_id}", add_run_data) - elif plan_id and config_ids: - update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", add_run_data) - else: - response = self.client.send_get(f"get_plan/{plan_id}") - entry_id = next( - ( - run["entry_id"] - for entry in response.response_text["entries"] - for run in entry["runs"] - if run["id"] == run_id - ), - None, - ) - update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", add_run_data) - run_response = self.client.send_get(f"get_run/{run_id}") - return run_response.response_text, update_response.error_message + def update_run( + self, + run_id: int, + run_name: str, + start_date: str = None, + end_date: str = None, + milestone_id: int = None, + refs: str = None, + refs_action: str = "add", + ) -> Tuple[dict, str]: + return self.run_handler.update_run(run_id, run_name, start_date, end_date, milestone_id, refs, refs_action) def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> str: - """ - Manage references based on the specified action. - :existing_refs: current references in the run - :new_refs: new references to process - :action: 'add', 'update', or 'delete' - :returns: updated references string - """ - if not existing_refs: - existing_refs = "" - - if action == 'update': - # Replace all references with new ones - return new_refs - elif action == 'delete': - if not new_refs: - # Delete all references - return "" - else: - # Delete specific references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - refs_to_delete = [ref.strip() for ref in new_refs.split(',') if ref.strip()] - updated_list = [ref for ref in existing_list if ref not in refs_to_delete] - return ','.join(updated_list) - else: # action == 'add' (default) - # Add new references to existing ones - if not existing_refs: - return new_refs - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - new_list = [ref.strip() for ref in new_refs.split(',') if ref.strip()] - # Avoid duplicates - combined_list = existing_list + [ref for ref in new_list if ref not in existing_list] - return ','.join(combined_list) + return self.run_handler._manage_references(existing_refs, new_refs, action) def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: - """ - Append references to a test run, avoiding duplicates. - :param run_id: ID of the test run - :param references: List of references to append - :returns: Tuple with (run_data, added_refs, skipped_refs, error_message) - """ - # Get current run data - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.error_message: - return None, [], [], run_response.error_message - - existing_refs = run_response.response_text.get("refs", "") or "" - - # Parse existing and new references - existing_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] - # Deduplicate input references - new_list = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - new_list.append(ref_clean) - seen.add(ref_clean) - - # Determine which references are new vs duplicates - added_refs = [ref for ref in new_list if ref not in existing_list] - skipped_refs = [ref for ref in new_list if ref in existing_list] - - # If no new references to add, return current state - if not added_refs: - return run_response.response_text, added_refs, skipped_refs, None - - # Combine references - combined_list = existing_list + added_refs - combined_refs = ','.join(combined_list) - - if len(combined_refs) > 250: - return None, [], [], f"Combined references length ({len(combined_refs)} characters) exceeds 250 character limit" - - update_data = {"refs": combined_refs} - - # Determine the correct API endpoint based on plan membership - plan_id = run_response.response_text.get("plan_id") - config_ids = run_response.response_text.get("config_ids") - - if not plan_id: - # Standalone run - update_response = self.client.send_post(f"update_run/{run_id}", update_data) - elif plan_id and config_ids: - # Run in plan with configurations - update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", update_data) - else: - # Run in plan without configurations - need to use plan entry endpoint - plan_response = self.client.send_get(f"get_plan/{plan_id}") - if plan_response.error_message: - return None, [], [], f"Failed to get plan details: {plan_response.error_message}" - - # Find the entry_id for this run - entry_id = None - for entry in plan_response.response_text.get("entries", []): - for run in entry.get("runs", []): - if run["id"] == run_id: - entry_id = entry["id"] - break - if entry_id: - break - - if not entry_id: - return None, [], [], f"Could not find plan entry for run {run_id}" - - update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) - - if update_response.error_message: - return None, [], [], update_response.error_message - - updated_run_response = self.client.send_get(f"get_run/{run_id}") - return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message - - def update_existing_case_references(self, case_id: int, junit_refs: str, strategy: str = "append") -> Tuple[bool, str, List[str], List[str]]: - """ - Update existing case references with values from JUnit properties. - :param case_id: ID of the test case - :param junit_refs: References from JUnit testrail_case_field property - :param strategy: 'append' or 'replace' - :returns: Tuple with (success, error_message, added_refs, skipped_refs) - """ - if not junit_refs or not junit_refs.strip(): - return True, None, [], [] # No references to process - - # Parse and validate JUnit references, deduplicating input - junit_ref_list = [] - seen = set() - for ref in junit_refs.split(','): - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - junit_ref_list.append(ref_clean) - seen.add(ref_clean) - - if not junit_ref_list: - return False, "No valid references found in JUnit property", [], [] - - # Get current case data - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.error_message: - return False, case_response.error_message, [], [] - - existing_refs = case_response.response_text.get('refs', '') or '' - - if strategy == "replace": - # Replace strategy: use JUnit refs as-is - new_refs = ','.join(junit_ref_list) - added_refs = junit_ref_list - skipped_refs = [] - else: - # Append strategy: combine with existing refs, avoiding duplicates - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] if existing_refs else [] - - # Determine which references are new vs duplicates - added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] - skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] - - # If no new references to add, return current state - if not added_refs: - return True, None, added_refs, skipped_refs - - # Combine references - combined_list = existing_ref_list + added_refs - new_refs = ','.join(combined_list) - - # Validate 2000 character limit for test case references - if len(new_refs) > 2000: - return False, f"Combined references length ({len(new_refs)} characters) exceeds 2000 character limit", [], [] - - # Update the case - update_data = {"refs": new_refs} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.error_message: - return False, update_response.error_message, [], [] - - return True, None, added_refs, skipped_refs - - def upload_attachments(self, report_results: [Dict], results: List[Dict], run_id: int): - """ Getting test result id and upload attachments for it. """ - tests_in_run, error = self.__get_all_tests_in_run(run_id) - if not error: - for report_result in report_results: - case_id = report_result["case_id"] - test_id = next((test["id"] for test in tests_in_run if test["case_id"] == case_id), None) - result_id = next((result["id"] for result in results if result["test_id"] == test_id), None) - for file_path in report_result.get("attachments"): - try: - with open(file_path, "rb") as file: - self.client.send_post(f"add_attachment_to_result/{result_id}", files={"attachment": file}) - except Exception as ex: - self.environment.elog(f"Error uploading attachment for case {case_id}: {ex}") - else: - self.environment.elog(f"Unable to upload attachments due to API request error: {error}") + return self.run_handler.append_run_references(run_id, references) - def add_results(self, run_id: int) -> Tuple[List, str, int]: - """ - Adds one or more new test results. - :run_id: run id - :returns: Tuple with dict created resources, error string, and results count. - """ - responses = [] - error_message = "" - # Get pre-validated user IDs if available - user_ids = getattr(self.environment, '_validated_user_ids', []) - - add_results_data_chunks = self.data_provider.add_results_for_cases( - self.environment.batch_size, user_ids - ) - # Get assigned count from data provider - assigned_count = getattr(self.data_provider, '_assigned_count', 0) - - results_amount = sum( - [len(results["results"]) for results in add_results_data_chunks] - ) + def update_existing_case_references( + self, case_id: int, junit_refs: str, strategy: str = "append" + ) -> Tuple[bool, str, List[str], List[str]]: + return self.case_handler.update_existing_case_references(case_id, junit_refs, strategy) - with self.environment.get_progress_bar( - results_amount=results_amount, prefix="Adding results" - ) as progress_bar: - with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: - futures = { - executor.submit( - self.client.send_post, f"add_results_for_cases/{run_id}", body - ): body - for body in add_results_data_chunks - } - responses, error_message = self.handle_futures( - futures=futures, - action_string="add_results", - progress_bar=progress_bar, - ) - if error_message: - # When error_message is present we cannot be sure that responses contains all added items. - # Iterate through futures to get all responses from done tasks (not cancelled) - responses = ApiRequestHandler.retrieve_results_after_cancelling(futures) - responses = [response.response_text for response in responses] - results = [ - result - for results_list in responses - for result in results_list - ] - report_results_w_attachments = [] - for results_data_chunk in add_results_data_chunks: - for test_result in results_data_chunk["results"]: - if test_result["attachments"]: - report_results_w_attachments.append(test_result) - if report_results_w_attachments: - attachments_count = 0 - for result in report_results_w_attachments: - attachments_count += len(result["attachments"]) - self.environment.log(f"Uploading {attachments_count} attachments " - f"for {len(report_results_w_attachments)} test results.") - self.upload_attachments(report_results_w_attachments, results, run_id) - else: - self.environment.log(f"No attachments found to upload.") - - # Log assignment results if assignment was performed - if user_ids: - total_failed = getattr(self.data_provider, '_total_failed_count', assigned_count) - if assigned_count > 0: - self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") - else: - self.environment.log(f"Assigning failed results: 0/0, Done.") - - return responses, error_message, progress_bar.n + def upload_attachments(self, report_results: List[Dict], results: List[Dict], run_id: int): + return self.result_handler.upload_attachments(report_results, results, run_id) + def add_results(self, run_id: int) -> Tuple[List, str, int]: + return self.result_handler.add_results(run_id) def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, str]: responses = [] @@ -776,9 +283,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st if action_string == "add_case": arguments = arguments.to_dict() arguments.pop("case_id") - if not self.response_verifier.verify_returned_data( - arguments, response.response_text - ): + if not self.response_verifier.verify_returned_data(arguments, response.response_text): responses.append(response) error_message = FAULT_MAPPING["data_verification_error"] self.__cancel_running_futures(futures, action_string) @@ -786,9 +291,7 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st progress_bar.update(1) else: error_message = response.error_message - self.environment.log( - f"\nError during {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nError during {action_string}. Trying to cancel scheduled tasks.") self.__cancel_running_futures(futures, action_string) break else: @@ -799,177 +302,168 @@ def handle_futures(self, futures, action_string, progress_bar) -> Tuple[list, st return responses, error_message def close_run(self, run_id: int) -> Tuple[dict, str]: - """ - Closes an existing test run and archives its tests & results. - :run_id: run id - :returns: Tuple with dict created resources and error string. - """ - body = {"run_id": run_id} - response = self.client.send_post(f"close_run/{run_id}", body) - return response.response_text, response.error_message + return self.run_handler.close_run(run_id) def delete_suite(self, suite_id: int) -> Tuple[dict, str]: - """ - Delete suite given suite id - :suite_id: suite id - :returns: Tuple with dict created resources and error string. - """ - response = self.client.send_post(f"delete_suite/{suite_id}", payload={}) - return response.response_text, response.error_message + return self.suite_handler.delete_suite(suite_id) def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: - """ - Delete section given add_sections response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - responses = [] - error_message = "" - for section in added_sections: - response = self.client.send_post( - f"delete_section/{section['section_id']}", payload={} - ) - if not response.error_message: - responses.append(response.response_text) - else: - error_message = response.error_message - break - return responses, error_message + return self.section_handler.delete_sections(added_sections) def delete_cases(self, suite_id: int, added_cases: List[Dict]) -> Tuple[Dict, str]: - """ - Delete cases given add_cases response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - body = {"case_ids": [case["case_id"] for case in added_cases]} - response = self.client.send_post(f"delete_cases/{suite_id}", payload=body) - return response.response_text, response.error_message + return self.case_handler.delete_cases(suite_id, added_cases) def delete_run(self, run_id) -> Tuple[dict, str]: - """ - Delete run given add_run response - :suite_id: section id - :returns: Tuple with dict created resources and error string. - """ - response = self.client.send_post(f"delete_run/{run_id}", payload={}) - return response.response_text, response.error_message + return self.run_handler.delete_run(run_id) @staticmethod def retrieve_results_after_cancelling(futures) -> list: - responses = [] - for future in as_completed(futures): - if not future.cancelled(): - response = future.result() - if not response.error_message: - responses.append(response) - return responses + """ + Retrieve results from futures after cancellation has been triggered. + Delegated to ResultHandler for backward compatibility. + """ + return ResultHandler.retrieve_results_after_cancelling(futures) def get_user_by_email(self, email: str) -> Tuple[Union[int, None], str]: """ Validates a user email and returns the user ID if valid. - + :param email: User email to validate :returns: Tuple with user ID (or None if not found) and error message """ if not email or not email.strip(): return None, "Email cannot be empty" - + email = email.strip() # Use proper URL encoding for the query parameter import urllib.parse + encoded_email = urllib.parse.quote_plus(email) response = self.client.send_get(f"get_user_by_email&email={encoded_email}") - + if response.error_message: # Map TestRail's email validation error to our expected format if "Field :email is not a valid email address" in response.error_message: return None, f"User not found: {email}" return None, response.error_message - + if response.status_code == 200: try: user_data = response.response_text - if isinstance(user_data, dict) and 'id' in user_data: - return user_data['id'], "" + if isinstance(user_data, dict) and "id" in user_data: + return user_data["id"], "" else: return None, f"Invalid response format for user: {email}" except (KeyError, TypeError): return None, f"Invalid response format for user: {email}" elif response.status_code == 400: # Check if the response contains the email validation error - if (hasattr(response, 'response_text') and response.response_text and - isinstance(response.response_text, dict) and - "Field :email is not a valid email address" in str(response.response_text.get('error', ''))): + if ( + hasattr(response, "response_text") + and response.response_text + and isinstance(response.response_text, dict) + and "Field :email is not a valid email address" in str(response.response_text.get("error", "")) + ): return None, f"User not found: {email}" return None, f"User not found: {email}" else: # For other status codes, check if it's the email validation error - if (hasattr(response, 'response_text') and response.response_text and - "Field :email is not a valid email address" in str(response.response_text)): + if ( + hasattr(response, "response_text") + and response.response_text + and "Field :email is not a valid email address" in str(response.response_text) + ): return None, f"User not found: {email}" return None, f"API error (status {response.status_code}) when validating user: {email}" def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: - case_body = case.to_dict() - active_field = getattr(self, "_active_automation_id_field", None) - if active_field == UPDATED_SYSTEM_NAME_AUTOMATION_ID and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: - case_body[UPDATED_SYSTEM_NAME_AUTOMATION_ID] = case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) - if self.environment.case_matcher != MatchersParser.AUTO and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: - case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) - response = self.client.send_post(f"add_case/{case_body.pop('section_id')}", case_body) - if response.status_code == 200: - case.case_id = response.response_text["id"] - case.result.case_id = response.response_text["id"] - case.section_id = response.response_text["section_id"] - return response + return self.case_handler._add_case_and_update_data(case) def __cancel_running_futures(self, futures, action_string): - self.environment.log( - f"\nAborting: {action_string}. Trying to cancel scheduled tasks." - ) + self.environment.log(f"\nAborting: {action_string}. Trying to cancel scheduled tasks.") for future in futures: future.cancel() def __get_all_cases(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ - Get all cases from all pages + Get all cases from all pages (with caching) """ - if suite_id is None: - return self.__get_all_entities('cases', f"get_cases/{project_id}") - else: - return self.__get_all_entities('cases', f"get_cases/{project_id}&suite_id={suite_id}") + cache_key = f"get_cases/{project_id}" + params = (project_id, suite_id) + + def fetch(): + if suite_id is None: + return self.__get_all_entities("cases", f"get_cases/{project_id}", entities=[]) + else: + return self.__get_all_entities("cases", f"get_cases/{project_id}&suite_id={suite_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_sections(self, project_id=None, suite_id=None) -> Tuple[List[dict], str]: """ - Get all sections from all pages + Get all sections from all pages (with caching) """ - return self.__get_all_entities('sections', f"get_sections/{project_id}&suite_id={suite_id}") + cache_key = f"get_sections/{project_id}" + params = (project_id, suite_id) + + def fetch(): + return self.__get_all_entities("sections", f"get_sections/{project_id}&suite_id={suite_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_tests_in_run(self, run_id=None) -> Tuple[List[dict], str]: """ - Get all tests from all pages + Get all tests from all pages (with caching) """ - return self.__get_all_entities('tests', f"get_tests/{run_id}") + cache_key = f"get_tests/{run_id}" + params = (run_id,) + + def fetch(): + return self.__get_all_entities("tests", f"get_tests/{run_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_projects(self) -> Tuple[List[dict], str]: """ - Get all projects from all pages + Get all projects from all pages (with caching) """ - return self.__get_all_entities('projects', f"get_projects") + cache_key = "get_projects" + params = None + + def fetch(): + return self.__get_all_entities("projects", f"get_projects", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_suites(self, project_id) -> Tuple[List[dict], str]: """ - Get all suites from all pages + Get all suites from all pages (with caching) """ - return self.__get_all_entities('suites', f"get_suites/{project_id}") + cache_key = f"get_suites/{project_id}" + params = (project_id,) + + def fetch(): + return self.__get_all_entities("suites", f"get_suites/{project_id}", entities=[]) + + return self._cache.get_or_fetch(cache_key, fetch, params) def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[Dict], str]: """ Get all entities from all pages if number of entities is too big to return in single response. Function using next page field in API response. Entity examples: cases, sections + + If ENABLE_PARALLEL_PAGINATION is True or --parallel-pagination flag is set, + will use parallel fetching for better performance. """ + # Check if parallel pagination is enabled (CLI flag takes precedence) + parallel_enabled = getattr(self.environment, "parallel_pagination", False) or ENABLE_PARALLEL_PAGINATION + + # Use parallel pagination if enabled and this is the first call (entities is empty) + if parallel_enabled and not entities: + return self.__get_all_entities_parallel(entity, link) + + # Otherwise use sequential pagination (original implementation) if link.startswith(self.suffix): link = link.replace(self.suffix, "") response = self.client.send_get(link) @@ -979,9 +473,7 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ return response.response_text, response.error_message # Check if response is a string (JSON parse failed) if isinstance(response.response_text, str): - error_msg = FAULT_MAPPING["invalid_api_response"].format( - error_details=response.response_text[:200] - ) + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) return [], error_msg # Endpoints with pagination entities = entities + response.response_text[entity] @@ -993,739 +485,349 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ else: return [], response.error_message - # Label management methods - def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: - """ - Add a new label to the project - :param project_id: ID of the project - :param title: Title of the label (max 20 characters) - :returns: Tuple with created label data and error string + def __get_all_entities_parallel(self, entity: str, link: str) -> Tuple[List[Dict], str]: """ - # Use multipart/form-data like the working CURL command - files = {'title': (None, title)} - response = self.client.send_post(f"add_label/{project_id}", payload=None, files=files) - return response.response_text, response.error_message + Parallel version of __get_all_entities for faster pagination. + Fetches multiple pages concurrently using ThreadPoolExecutor. - def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict, str]: - """ - Update an existing label - :param label_id: ID of the label to update - :param project_id: ID of the project - :param title: New title for the label (max 20 characters) - :returns: Tuple with updated label data and error string + :param entity: Entity type (cases, sections, etc.) + :param link: Initial API link + :returns: Tuple of (all entities list, error message) """ - # Use multipart/form-data like add_label - files = { - 'project_id': (None, str(project_id)), - 'title': (None, title) # Field name is 'title' (no colon) for form data - } - response = self.client.send_post(f"update_label/{label_id}", payload=None, files=files) - return response.response_text, response.error_message + fetch_start_time = time.time() - def get_label(self, label_id: int) -> Tuple[dict, str]: - """ - Get a specific label by ID - :param label_id: ID of the label to retrieve - :returns: Tuple with label data and error string - """ - response = self.client.send_get(f"get_label/{label_id}") - return response.response_text, response.error_message + if link.startswith(self.suffix): + link = link.replace(self.suffix, "") - def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tuple[dict, str]: - """ - Get all labels for a project with pagination - :param project_id: ID of the project - :param offset: Offset for pagination - :param limit: Limit for pagination - :returns: Tuple with labels data (including pagination info) and error string - """ - params = [] - if offset > 0: - params.append(f"offset={offset}") - if limit != 250: - params.append(f"limit={limit}") - - url = f"get_labels/{project_id}" - if params: - url += "&" + "&".join(params) - - response = self.client.send_get(url) - return response.response_text, response.error_message + # Step 1: Fetch first page to get metadata + self.environment.log(f"Fetching first page to determine total pages...") + response = self.client.send_get(link) - def delete_label(self, label_id: int) -> Tuple[bool, str]: - """ - Delete a single label - :param label_id: ID of the label to delete - :returns: Tuple with success status and error string - """ - response = self.client.send_post(f"delete_label/{label_id}") - success = response.status_code == 200 - return success, response.error_message + if response.error_message: + return [], response.error_message - def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: - """ - Delete multiple labels - :param label_ids: List of label IDs to delete - :returns: Tuple with success status and error string - """ - # Send as form data with JSON array format - import json - label_ids_json = json.dumps(label_ids) - files = {"label_ids": (None, label_ids_json)} - response = self.client.send_post("delete_labels", payload=None, files=files) - success = response.status_code == 200 - return success, response.error_message - - def add_labels_to_cases(self, case_ids: List[int], title: str, project_id: int, suite_id: int = None) -> Tuple[dict, str]: - """ - Add a label to multiple test cases - - :param case_ids: List of test case IDs - :param title: Label title (max 20 characters) - :param project_id: Project ID for validation - :param suite_id: Suite ID (optional) - :returns: Tuple with response data and error string - """ - # Initialize results structure - results = { - 'successful_cases': [], - 'failed_cases': [], - 'max_labels_reached': [], - 'case_not_found': [] - } - - # Check if project is multi-suite by getting all cases without suite_id - all_cases_no_suite, error_message = self.__get_all_cases(project_id, None) - if error_message: - return results, error_message - - # Check if project has multiple suites - suite_ids = set() - for case in all_cases_no_suite: - if 'suite_id' in case and case['suite_id']: - suite_ids.add(case['suite_id']) - - # If project has multiple suites and no suite_id provided, require it - if len(suite_ids) > 1 and suite_id is None: - return results, "This project is multisuite, suite id is required" - - # Get all cases to validate that the provided case IDs exist - all_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return results, error_message - - # Create a set of existing case IDs for quick lookup - existing_case_ids = {case['id'] for case in all_cases} - - # Validate case IDs and separate valid from invalid ones - invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] - valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] - - # Record invalid case IDs - for case_id in invalid_case_ids: - results['case_not_found'].append(case_id) - - # If no valid case IDs, return early - if not valid_case_ids: - return results, "" - - # Check if label exists or create it - existing_labels, error_message = self.get_labels(project_id) - if error_message: - return results, error_message - - # Find existing label with the same title - label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') - break - - # Create label if it doesn't exist - if label_id is None: - label_data, error_message = self.add_label(project_id, title) - if error_message: - return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - - # Collect case data and validate constraints - cases_to_update = [] - for case_id in valid_case_ids: - # Get current case to check existing labels - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - results['failed_cases'].append({ - 'case_id': case_id, - 'error': f"Could not retrieve case {case_id}: {case_response.error_message}" - }) - continue - - case_data = case_response.response_text - current_labels = case_data.get('labels', []) - - # Check if label already exists on this case - if any(label.get('id') == label_id for label in current_labels): - results['successful_cases'].append({ - 'case_id': case_id, - 'message': f"Label '{title}' already exists on case {case_id}" - }) - continue - - # Check maximum labels limit (10) - if len(current_labels) >= 10: - results['max_labels_reached'].append(case_id) - continue - - # Prepare case for update - existing_label_ids = [label.get('id') for label in current_labels if label.get('id')] - updated_label_ids = existing_label_ids + [label_id] - cases_to_update.append({ - 'case_id': case_id, - 'labels': updated_label_ids - }) - - # Update cases using appropriate endpoint - if len(cases_to_update) == 1: - # Single case: use update_case/{case_id} - case_info = cases_to_update[0] - case_update_data = {'labels': case_info['labels']} - - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - - if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) - else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) - elif len(cases_to_update) > 1: - # Multiple cases: use update_cases/{suite_id} - # Need to determine suite_id from the cases - case_suite_id = suite_id - if not case_suite_id: - # Get suite_id from the first case if not provided - first_case = all_cases[0] if all_cases else None - case_suite_id = first_case.get('suite_id') if first_case else None - - if not case_suite_id: - # Fall back to individual updates if no suite_id available - for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - - if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) - else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) + # Handle non-paginated responses (legacy endpoints) + if isinstance(response.response_text, list): + return response.response_text, response.error_message + + if isinstance(response.response_text, str): + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) + return [], error_msg + + # Collect first page results + all_entities = response.response_text[entity] + first_page_count = len(all_entities) + + # Check if there are more pages + if response.response_text["_links"]["next"] is None: + # Only one page, return immediately + fetch_time = time.time() - fetch_start_time + self.environment.log(f"Single page fetch completed in {fetch_time:.1f}s") + return all_entities, response.error_message + + # Step 2: Calculate total pages needed + # TestRail pagination uses limit parameter (default 250) + # We need to parse the next link to understand pagination structure + next_link = response.response_text["_links"]["next"] + + # Extract offset/limit from the link to calculate total pages + from urllib.parse import urlparse, parse_qs + + # Parse the next link to get offset and limit + parsed = urlparse(next_link) + query_params = parse_qs(parsed.query) + + # Get limit (page size) - default to 250 if not found + limit = int(query_params.get("limit", [250])[0]) + if limit == 0: + limit = 250 + + # Get offset from next link + next_offset = int(query_params.get("offset", [limit])[0]) + + # Step 3: Fetch pages in parallel with dynamic offset generation + # Build base link without offset parameter + # TestRail API uses '&' as separator (e.g., get_cases/123&suite_id=2&offset=250) + base_link = link.split("&offset=")[0].split("?offset=")[0] + + self.environment.log( + f"Starting parallel fetch: first page has {first_page_count} {entity}, " + f"fetching remaining pages with {MAX_WORKERS_PARALLEL_PAGINATION} workers..." + ) + + def fetch_page(offset): + """Fetch a single page by offset""" + # TestRail always uses '&' as separator, not '?' + page_link = f"{base_link}&offset={offset}&limit={limit}" + page_response = self.client.send_get(page_link) + + if page_response.error_message: + return None, page_response.error_message + + if isinstance(page_response.response_text, dict) and entity in page_response.response_text: + page_data = page_response.response_text[entity] + # Return empty list if this page has no data (we've reached the end) + if not page_data: + return [], None + return page_data, None else: - # Batch update using update_cases/{suite_id} - batch_update_data = { - 'case_ids': [case_info['case_id'] for case_info in cases_to_update], - 'labels': cases_to_update[0]['labels'] # Assuming same labels for all cases - } - - batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) - - if batch_response.status_code == 200: - for case_info in cases_to_update: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) - else: - # If batch update fails, fall back to individual updates - for case_info in cases_to_update: - case_update_data = {'labels': case_info['labels']} - update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) - - if update_response.status_code == 200: - results['successful_cases'].append({ - 'case_id': case_info['case_id'], - 'message': f"Successfully added label '{title}' to case {case_info['case_id']}" - }) + return None, "Invalid response format" + + # Fetch pages in parallel with intelligent batching to avoid overwhelming server + error_message = "" + pages_fetched = 1 # We already have the first page + + # Use batching: submit batches of pages, check results, submit next batch + # This prevents overwhelming the server with 10k requests at once + batch_size = 100 # Submit 100 pages at a time + current_page_index = 0 + max_pages = 10000 # Safety cap + consecutive_empty_pages = 0 + max_consecutive_empty = 10 # Stop after 10 consecutive empty pages + + with ThreadPoolExecutor(max_workers=MAX_WORKERS_PARALLEL_PAGINATION) as executor: + should_continue = True + + while should_continue and current_page_index < max_pages: + # Submit next batch of pages + futures = {} + batch_offsets = [] + + for i in range(batch_size): + if current_page_index + i >= max_pages: + break + offset = next_offset + ((current_page_index + i) * limit) + batch_offsets.append(offset) + future = executor.submit(fetch_page, offset) + futures[future] = offset + + if not futures: + break + + # Process this batch + batch_had_data = False + for future in as_completed(futures): + offset = futures[future] + try: + page_data, page_error = future.result() + + if page_error: + error_message = page_error + self.environment.elog(f"Error fetching page at offset {offset}: {page_error}") + should_continue = False + # Cancel remaining futures in this batch + for f in futures: + if not f.done(): + f.cancel() + break + + if page_data is None: + # Error occurred + error_message = "Invalid response format" + should_continue = False + # Cancel remaining + for f in futures: + if not f.done(): + f.cancel() + break + + if len(page_data) == 0: + # Empty page + consecutive_empty_pages += 1 + if consecutive_empty_pages >= max_consecutive_empty: + # We've hit enough empty pages, stop fetching + self.environment.log(f"Reached end of data after {consecutive_empty_pages} empty pages") + should_continue = False + # Cancel remaining futures in this batch + for f in futures: + if not f.done(): + f.cancel() + break else: - results['failed_cases'].append({ - 'case_id': case_info['case_id'], - 'error': update_response.error_message - }) - - return results, "" - - def get_cases_by_label(self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None) -> Tuple[List[dict], str]: - """ - Get test cases filtered by label ID or title - - :param project_id: Project ID - :param suite_id: Suite ID (optional) - :param label_ids: List of label IDs to filter by - :param label_title: Label title to filter by - :returns: Tuple with list of matching cases and error string - """ - # Get all cases first - all_cases, error_message = self.__get_all_cases(project_id, suite_id) + # Got data - reset consecutive empty counter + consecutive_empty_pages = 0 + batch_had_data = True + + # Add results to our collection + all_entities.extend(page_data) + pages_fetched += 1 + + # Log progress every 50 pages + if pages_fetched % 50 == 0: + self.environment.log( + f"Fetched {pages_fetched} pages, {len(all_entities)} {entity} so far..." + ) + + except Exception as ex: + error_message = f"Exception during parallel fetch: {str(ex)}" + self.environment.elog(error_message) + should_continue = False + # Cancel remaining + for f in futures: + if not f.done(): + f.cancel() + break + + # Move to next batch + current_page_index += batch_size + + # If this batch had no data at all, we've likely reached the end + if not batch_had_data and consecutive_empty_pages > 0: + should_continue = False + + fetch_time = time.time() - fetch_start_time + if error_message: - return [], error_message - - # If filtering by title, first get the label ID - target_label_ids = label_ids or [] - if label_title and not target_label_ids: - labels_data, error_message = self.get_labels(project_id) - if error_message: - return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - - if not target_label_ids: - return [], "" # No label found is a valid case with 0 results - - # Filter cases that have any of the target labels - matching_cases = [] - for case in all_cases: - case_labels = case.get('labels', []) - case_label_ids = [label.get('id') for label in case_labels] - - # Check if any of the target label IDs are present in this case - if any(label_id in case_label_ids for label_id in target_label_ids): - matching_cases.append(case) - - return matching_cases, "" - - def add_labels_to_tests(self, test_ids: List[int], titles: Union[str, List[str]], project_id: int) -> Tuple[dict, str]: + self.environment.elog(f"Parallel fetch failed after {fetch_time:.1f}s, falling back to sequential...") + # Fall back to sequential fetch + return self.__get_all_entities_sequential(entity, link, []) + + self.environment.log( + f"Parallel fetch completed: {len(all_entities)} {entity} in {fetch_time:.1f}s " + f"(~{len(all_entities) / fetch_time:.0f} items/sec)" + ) + + return all_entities, "" + + def __get_all_entities_sequential(self, entity: str, link: str, entities: List[Dict]) -> Tuple[List[Dict], str]: """ - Add labels to multiple tests - - :param test_ids: List of test IDs - :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) - :param project_id: Project ID for validation - :returns: Tuple with response data and error string + Sequential fallback for __get_all_entities (original implementation). + This is kept separate for fallback purposes. """ - # Initialize results structure - results = { - 'successful_tests': [], - 'failed_tests': [], - 'max_labels_reached': [], - 'test_not_found': [] - } - - # Normalize titles to a list - if isinstance(titles, str): - title_list = [titles] - else: - title_list = titles - - # At this point, title_list should already be validated by the CLI - # Just ensure we have clean titles - title_list = [title.strip() for title in title_list if title.strip()] - - if not title_list: - return {}, "No valid labels provided" - - # Validate test IDs by getting run information for each test - valid_test_ids = [] - for test_id in test_ids: - # Get test information to validate it exists - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results['test_not_found'].append(test_id) - continue - - test_data = test_response.response_text - # Validate that the test belongs to the correct project - run_id = test_data.get('run_id') - if run_id: - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.status_code == 200: - run_data = run_response.response_text - if run_data.get('project_id') == project_id: - valid_test_ids.append(test_id) - else: - results['test_not_found'].append(test_id) - else: - results['test_not_found'].append(test_id) - else: - results['test_not_found'].append(test_id) - - # If no valid test IDs, return early - if not valid_test_ids: - return results, "" - - # Check if labels exist or create them - existing_labels, error_message = self.get_labels(project_id) - if error_message: - return results, error_message - - # Process each title to get/create label IDs - label_ids = [] - label_id_to_title = {} # Map label IDs to their titles - for title in title_list: - # Find existing label with the same title - label_id = None - for label in existing_labels.get('labels', []): - if label.get('title') == title: - label_id = label.get('id') - break - - # Create label if it doesn't exist - if label_id is None: - label_data, error_message = self.add_label(project_id, title) - if error_message: - return results, error_message - label_info = label_data.get('label', label_data) - label_id = label_info.get('id') - - if label_id: - label_ids.append(label_id) - label_id_to_title[label_id] = title - - # Collect test data and validate constraints - tests_to_update = [] - for test_id in valid_test_ids: - # Get current test to check existing labels - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results['failed_tests'].append({ - 'test_id': test_id, - 'error': f"Could not retrieve test {test_id}: {test_response.error_message}" - }) - continue - - test_data = test_response.response_text - current_labels = test_data.get('labels', []) - current_label_ids = [label.get('id') for label in current_labels if label.get('id')] - - new_label_ids = [] - already_exists_titles = [] - - for label_id in label_ids: - if label_id not in current_label_ids: - new_label_ids.append(label_id) - else: - if label_id in label_id_to_title: - already_exists_titles.append(label_id_to_title[label_id]) - - if not new_label_ids: - results['successful_tests'].append({ - 'test_id': test_id, - 'message': f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}" - }) - continue - - # Check maximum labels limit (10) - if len(current_label_ids) + len(new_label_ids) > 10: - results['max_labels_reached'].append(test_id) - continue - - # Prepare test for update - updated_label_ids = current_label_ids + new_label_ids - - new_label_titles = [] - for label_id in new_label_ids: - if label_id in label_id_to_title: - new_label_titles.append(label_id_to_title[label_id]) - - tests_to_update.append({ - 'test_id': test_id, - 'labels': updated_label_ids, - 'new_labels': new_label_ids, - 'new_label_titles': new_label_titles - }) - - # Update tests using appropriate endpoint - if len(tests_to_update) == 1: - # Single test: use update_test/{test_id} - test_info = tests_to_update[0] - test_update_data = {'labels': test_info['labels']} - - update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - - if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) - new_label_count = len(new_label_titles) - - if new_label_count == 1: - message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" - elif new_label_count > 1: - message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" - else: - message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) + if link.startswith(self.suffix): + link = link.replace(self.suffix, "") + response = self.client.send_get(link) + if not response.error_message: + if isinstance(response.response_text, list): + return response.response_text, response.error_message + if isinstance(response.response_text, str): + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) + return [], error_msg + entities = entities + response.response_text[entity] + if response.response_text["_links"]["next"] is not None: + next_link = response.response_text["_links"]["next"].replace("limit=0", "limit=250") + return self.__get_all_entities_sequential(entity, link=next_link, entities=entities) else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) + return entities, response.error_message else: - # Multiple tests: use individual updates to ensure each test gets its specific labels - for test_info in tests_to_update: - test_update_data = {'labels': test_info['labels']} - update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) - - if update_response.status_code == 200: - new_label_titles = test_info.get('new_label_titles', []) - new_label_count = len(new_label_titles) - - if new_label_count == 1: - message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" - elif new_label_count > 1: - message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" - else: - message = f"No new labels added to test {test_info['test_id']}" - - results['successful_tests'].append({ - 'test_id': test_info['test_id'], - 'message': message - }) - else: - results['failed_tests'].append({ - 'test_id': test_info['test_id'], - 'error': update_response.error_message - }) - - return results, "" - - def get_tests_by_label(self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None) -> Tuple[List[dict], str]: + return [], response.error_message + + def __validate_case_ids_exist(self, suite_id: int, case_ids: List[int]) -> set: """ - Get tests filtered by label ID or title from specific runs - - :param project_id: Project ID - :param label_ids: List of label IDs to filter by - :param label_title: Label title to filter by - :param run_ids: List of run IDs to filter tests from (optional, defaults to all runs) - :returns: Tuple with list of matching tests and error string + Validate that case IDs exist in TestRail without fetching all cases. + Returns set of valid case IDs. + + :param suite_id: Suite ID + :param case_ids: List of case IDs to validate + :returns: Set of case IDs that exist in TestRail """ - # If filtering by title, first get the label ID - target_label_ids = label_ids or [] - if label_title and not target_label_ids: - labels_data, error_message = self.get_labels(project_id) - if error_message: - return [], error_message - - for label in labels_data.get('labels', []): - if label.get('title') == label_title: - target_label_ids.append(label.get('id')) - - if not target_label_ids: - return [], "" # No label found is a valid case with 0 results - - # Get runs for the project (either all runs or specific run IDs) - if run_ids: - # Use specific run IDs - validate they exist by getting run details - runs = [] - for run_id in run_ids: - run_response = self.client.send_get(f"get_run/{run_id}") - if run_response.status_code == 200: - runs.append(run_response.response_text) - else: - return [], f"Run ID {run_id} not found or inaccessible" + if not case_ids: + return set() + + valid_ids = set() + + # For large numbers of case IDs, use concurrent validation + if len(case_ids) > 50: + from concurrent.futures import ThreadPoolExecutor, as_completed + + def check_case_exists(case_id): + """Check if a single case exists""" + response = self.client.send_get(f"get_case/{case_id}") + if response.status_code == 200 and not response.error_message: + # Verify case belongs to correct project/suite + case_data = response.response_text + if case_data.get("suite_id") == suite_id: + return case_id + return None + + # Use 10 concurrent workers to validate IDs + with ThreadPoolExecutor(max_workers=10) as executor: + futures = {executor.submit(check_case_exists, cid): cid for cid in case_ids} + + for future in as_completed(futures): + result = future.result() + if result is not None: + valid_ids.add(result) else: - # Get all runs for the project - runs_response = self.client.send_get(f"get_runs/{project_id}") - if runs_response.status_code != 200: - return [], runs_response.error_message - - runs_data = runs_response.response_text - runs = runs_data.get('runs', []) if isinstance(runs_data, dict) else runs_data - - # Collect all tests from all runs - matching_tests = [] - for run in runs: - run_id = run.get('id') - if not run_id: - continue - - # Get tests for this run - tests_response = self.client.send_get(f"get_tests/{run_id}") - if tests_response.status_code != 200: - continue # Skip this run if we can't get tests - - tests_data = tests_response.response_text - tests = tests_data.get('tests', []) if isinstance(tests_data, dict) else tests_data - - # Filter tests that have any of the target labels - for test in tests: - test_labels = test.get('labels', []) - test_label_ids = [label.get('id') for label in test_labels] - - # Check if any of the target label IDs are present in this test - if any(label_id in test_label_ids for label_id in target_label_ids): - matching_tests.append(test) - - return matching_tests, "" + # For small sets, validate sequentially + for case_id in case_ids: + response = self.client.send_get(f"get_case/{case_id}") + if response.status_code == 200 and not response.error_message: + case_data = response.response_text + if case_data.get("suite_id") == suite_id: + valid_ids.add(case_id) + + return valid_ids + + # Label management methods (delegated to LabelManager for backward compatibility) + def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: + return self.label_manager.add_label(project_id, title) + + def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict, str]: + return self.label_manager.update_label(label_id, project_id, title) + + def get_label(self, label_id: int) -> Tuple[dict, str]: + return self.label_manager.get_label(label_id) + + def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tuple[dict, str]: + return self.label_manager.get_labels(project_id, offset, limit) + + def delete_label(self, label_id: int) -> Tuple[bool, str]: + return self.label_manager.delete_label(label_id) + + def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: + return self.label_manager.delete_labels(label_ids) + + def add_labels_to_cases( + self, case_ids: List[int], title: str, project_id: int, suite_id: int = None + ) -> Tuple[dict, str]: + return self.label_manager.add_labels_to_cases( + case_ids, title, project_id, suite_id, get_all_cases_callback=self.__get_all_cases + ) + + def get_cases_by_label( + self, project_id: int, suite_id: int = None, label_ids: List[int] = None, label_title: str = None + ) -> Tuple[List[dict], str]: + return self.label_manager.get_cases_by_label( + project_id, suite_id, label_ids, label_title, get_all_cases_callback=self.__get_all_cases + ) + + def add_labels_to_tests( + self, test_ids: List[int], titles: Union[str, List[str]], project_id: int + ) -> Tuple[dict, str]: + return self.label_manager.add_labels_to_tests(test_ids, titles, project_id) + + def get_tests_by_label( + self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None + ) -> Tuple[List[dict], str]: + return self.label_manager.get_tests_by_label(project_id, label_ids, label_title, run_ids) def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: - """ - Get labels for specific tests - - :param test_ids: List of test IDs to get labels for - :returns: Tuple with list of test label information and error string - """ - results = [] - - for test_id in test_ids: - # Get test information - test_response = self.client.send_get(f"get_test/{test_id}") - if test_response.status_code != 200: - results.append({ - 'test_id': test_id, - 'error': f"Test {test_id} not found or inaccessible", - 'labels': [] - }) - continue - - test_data = test_response.response_text - test_labels = test_data.get('labels', []) - - results.append({ - 'test_id': test_id, - 'title': test_data.get('title', 'Unknown'), - 'status_id': test_data.get('status_id'), - 'labels': test_labels, - 'error': None - }) - - return results, "" - - # Test case reference management methods + return self.label_manager.get_test_labels(test_ids) + + # Test case reference management methods (delegated to ReferenceManager for backward compatibility) def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: - """ - Add references to a test case - :param case_id: ID of the test case - :param references: List of references to add - :returns: Tuple with success status and error string - """ - # First get the current test case to retrieve existing references - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - - case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - - # Parse existing references - existing_ref_list = [] - if existing_refs: - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - - # Deduplicate input references while preserving order - deduplicated_input = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - deduplicated_input.append(ref_clean) - seen.add(ref_clean) - - # Add new references (avoid duplicates with existing) - all_refs = existing_ref_list.copy() - for ref in deduplicated_input: - if ref not in all_refs: - all_refs.append(ref) - - # Join all references - new_refs_string = ','.join(all_refs) - - # Validate total character limit - if len(new_refs_string) > 2000: - return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - - # Update the test case with new references - update_data = {'refs': new_refs_string} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + return self.reference_manager.add_case_references(case_id, references) def update_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: - """ - Update references on a test case by replacing existing ones - :param case_id: ID of the test case - :param references: List of references to replace existing ones - :returns: Tuple with success status and error string - """ - # Deduplicate input references while preserving order - deduplicated_refs = [] - seen = set() - for ref in references: - ref_clean = ref.strip() - if ref_clean and ref_clean not in seen: - deduplicated_refs.append(ref_clean) - seen.add(ref_clean) - - # Join references - new_refs_string = ','.join(deduplicated_refs) - - # Validate total character limit - if len(new_refs_string) > 2000: - return False, f"Total references length ({len(new_refs_string)} characters) exceeds 2000 character limit" - - # Update the test case with new references - update_data = {'refs': new_refs_string} - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + return self.reference_manager.update_case_references(case_id, references) def delete_case_references(self, case_id: int, specific_references: List[str] = None) -> Tuple[bool, str]: - """ - Delete all or specific references from a test case - :param case_id: ID of the test case - :param specific_references: List of specific references to delete (None to delete all) - :returns: Tuple with success status and error string - """ - if specific_references is None: - # Delete all references by setting refs to empty string - update_data = {'refs': ''} - else: - # First get the current test case to retrieve existing references - case_response = self.client.send_get(f"get_case/{case_id}") - if case_response.status_code != 200: - return False, f"Failed to retrieve test case {case_id}: {case_response.error_message}" - - case_data = case_response.response_text - existing_refs = case_data.get('refs', '') or '' - - if not existing_refs: - # No references to delete - return True, "" - - # Parse existing references - existing_ref_list = [ref.strip() for ref in existing_refs.split(',') if ref.strip()] - - # Deduplicate input references for efficient processing - refs_to_delete = set(ref.strip() for ref in specific_references if ref.strip()) - - # Remove specific references - remaining_refs = [ref for ref in existing_ref_list if ref not in refs_to_delete] - - # Join remaining references - new_refs_string = ','.join(remaining_refs) - update_data = {'refs': new_refs_string} - - # Update the test case - update_response = self.client.send_post(f"update_case/{case_id}", update_data) - - if update_response.status_code == 200: - return True, "" - else: - return False, update_response.error_message + return self.reference_manager.delete_case_references(case_id, specific_references) + + def update_case_automation_id(self, case_id: int, automation_id: str) -> Tuple[bool, str]: + return self.case_handler.update_case_automation_id(case_id, automation_id) + + def add_bdd(self, section_id: int, feature_content: str) -> Tuple[List[int], str]: + return self.bdd_handler.add_bdd(section_id, feature_content) + + def get_bdd(self, case_id: int) -> Tuple[str, str]: + return self.bdd_handler.get_bdd(case_id) + + def get_bdd_template_id(self, project_id: int) -> Tuple[int, str]: + return self.bdd_handler.get_bdd_template_id(project_id) + + def add_case_bdd( + self, section_id: int, title: str, bdd_content: str, template_id: int, tags: List[str] = None + ) -> Tuple[int, str]: + return self.bdd_handler.add_case_bdd(section_id, title, bdd_content, template_id, tags) diff --git a/trcli/api/api_utils.py b/trcli/api/api_utils.py new file mode 100644 index 0000000..c42d80b --- /dev/null +++ b/trcli/api/api_utils.py @@ -0,0 +1,285 @@ +""" +API Utilities - Shared utilities for API handlers + +This module provides common utilities to reduce code duplication across handlers: +- Reference parsing and validation +- Response validation +- Type definitions for better type safety +""" + +from beartype.typing import List, Tuple, Optional, Literal +from typing_extensions import TypedDict + + +# ============================================================================ +# Type Definitions for Better Type Safety +# ============================================================================ + + +class TestRailResponse(TypedDict, total=False): + """Type definition for TestRail API responses""" + + id: int + name: str + title: str + suite_id: int + section_id: int + case_id: int + refs: str + error: str + + +# Literal types for strategy parameters +ReferenceStrategy = Literal["add", "update", "delete", "append", "replace"] + + +# ============================================================================ +# Reference Utilities +# ============================================================================ + + +def parse_references(refs_string: str) -> List[str]: + """ + Parse a comma-separated reference string into a list of cleaned references. + + Args: + refs_string: Comma-separated string of references (e.g., "REF-1, REF-2, REF-3") + + Returns: + List of cleaned, non-empty reference strings + + Example: + >>> parse_references("REF-1, , REF-2 ,REF-3") + ['REF-1', 'REF-2', 'REF-3'] + """ + if not refs_string: + return [] + return [ref.strip() for ref in refs_string.split(",") if ref.strip()] + + +def deduplicate_references(references: List[str]) -> List[str]: + """ + Deduplicate a list of references while preserving order. + + Args: + references: List of reference strings + + Returns: + List of unique references in original order + + Example: + >>> deduplicate_references(['REF-1', 'REF-2', 'REF-1', 'REF-3']) + ['REF-1', 'REF-2', 'REF-3'] + """ + seen = set() + result = [] + for ref in references: + ref_clean = ref.strip() + if ref_clean and ref_clean not in seen: + result.append(ref_clean) + seen.add(ref_clean) + return result + + +def join_references(references: List[str]) -> str: + """ + Join a list of references into a comma-separated string. + + Args: + references: List of reference strings + + Returns: + Comma-separated string of references + + Example: + >>> join_references(['REF-1', 'REF-2', 'REF-3']) + 'REF-1,REF-2,REF-3' + """ + return ",".join(references) + + +def validate_references_length(refs_string: str, max_length: int) -> Tuple[bool, Optional[str]]: + """ + Validate that a reference string doesn't exceed the maximum length. + + Args: + refs_string: Comma-separated string of references + max_length: Maximum allowed length + + Returns: + Tuple of (is_valid, error_message) + - is_valid: True if length is valid, False otherwise + - error_message: None if valid, error description if invalid + + Example: + >>> validate_references_length("REF-1,REF-2", 2000) + (True, None) + >>> validate_references_length("X" * 2001, 2000) + (False, "Combined references length (2001 characters) exceeds 2000 character limit") + """ + length = len(refs_string) + if length > max_length: + return False, f"Combined references length ({length} characters) exceeds {max_length} character limit" + return True, None + + +def merge_references(existing_refs: str, new_refs: str, strategy: ReferenceStrategy = "add") -> str: + """ + Merge existing and new references based on the specified strategy. + + Args: + existing_refs: Current comma-separated references + new_refs: New comma-separated references to merge + strategy: How to merge references: + - 'add'/'append': Add new refs to existing, avoiding duplicates + - 'update'/'replace': Replace all existing refs with new refs + - 'delete': Remove specified refs from existing + + Returns: + Merged comma-separated reference string + + Examples: + >>> merge_references("REF-1,REF-2", "REF-3,REF-4", "add") + 'REF-1,REF-2,REF-3,REF-4' + >>> merge_references("REF-1,REF-2", "REF-3", "update") + 'REF-3' + >>> merge_references("REF-1,REF-2,REF-3", "REF-2", "delete") + 'REF-1,REF-3' + """ + if strategy in ("update", "replace"): + # Replace all references with new ones + return new_refs + + elif strategy == "delete": + if not new_refs: + # Delete all references + return "" + # Delete specific references + existing_list = parse_references(existing_refs) + refs_to_delete = set(parse_references(new_refs)) + remaining = [ref for ref in existing_list if ref not in refs_to_delete] + return join_references(remaining) + + else: # strategy in ('add', 'append') + # Add new references to existing ones, avoiding duplicates + if not existing_refs: + return new_refs + + existing_list = parse_references(existing_refs) + new_list = parse_references(new_refs) + + # Combine, avoiding duplicates while preserving order + combined = existing_list + [ref for ref in new_list if ref not in existing_list] + return join_references(combined) + + +def calculate_reference_changes(existing_refs: str, new_refs: str) -> Tuple[List[str], List[str]]: + """ + Calculate which references will be added and which are duplicates. + + Args: + existing_refs: Current comma-separated references + new_refs: New comma-separated references to process + + Returns: + Tuple of (added_refs, skipped_refs) + - added_refs: References that will be newly added + - skipped_refs: References that already exist (duplicates) + + Example: + >>> calculate_reference_changes("REF-1,REF-2", "REF-2,REF-3") + (['REF-3'], ['REF-2']) + """ + existing_list = parse_references(existing_refs) + new_list = deduplicate_references(parse_references(new_refs)) + + added_refs = [ref for ref in new_list if ref not in existing_list] + skipped_refs = [ref for ref in new_list if ref in existing_list] + + return added_refs, skipped_refs + + +# ============================================================================ +# Response Validation Utilities +# ============================================================================ + + +def check_response_error(response, default_error_msg: str = "API request failed") -> Optional[str]: + """ + Check if a response contains an error and return the error message. + + Args: + response: API response object with error_message attribute + default_error_msg: Default message if error_message is empty + + Returns: + Error message string if error exists, None otherwise + + Example: + >>> response = MockResponse(error_message="Field not found") + >>> check_response_error(response) + 'Field not found' + """ + if hasattr(response, "error_message") and response.error_message: + return response.error_message + return None + + +def validate_response_field( + response_data: dict, field_name: str, error_prefix: str = "Response" +) -> Tuple[bool, Optional[str]]: + """ + Validate that a required field exists in the response data. + + Args: + response_data: Dictionary containing response data + field_name: Name of the required field + error_prefix: Prefix for error message + + Returns: + Tuple of (is_valid, error_message) + - is_valid: True if field exists, False otherwise + - error_message: None if valid, error description if invalid + + Example: + >>> validate_response_field({"id": 123, "name": "Test"}, "id") + (True, None) + >>> validate_response_field({"name": "Test"}, "id") + (False, "Response missing 'id' field") + """ + if field_name in response_data: + return True, None + return False, f"{error_prefix} missing '{field_name}' field" + + +# ============================================================================ +# Common Patterns +# ============================================================================ + + +def safe_get_nested(data: dict, *keys, default=None): + """ + Safely get a nested value from a dictionary. + + Args: + data: Dictionary to search + *keys: Sequence of keys to traverse + default: Default value if key path not found + + Returns: + Value at the key path, or default if not found + + Example: + >>> data = {"user": {"profile": {"name": "John"}}} + >>> safe_get_nested(data, "user", "profile", "name") + 'John' + >>> safe_get_nested(data, "user", "invalid", "key", default="N/A") + 'N/A' + """ + current = data + for key in keys: + if isinstance(current, dict) and key in current: + current = current[key] + else: + return default + return current diff --git a/trcli/api/bdd_handler.py b/trcli/api/bdd_handler.py new file mode 100644 index 0000000..6a3c49b --- /dev/null +++ b/trcli/api/bdd_handler.py @@ -0,0 +1,224 @@ +""" +BddHandler - Handles all BDD (Behavior-Driven Development) related operations for TestRail + +It manages all BDD operations including: +- Uploading .feature files +- Retrieving BDD test cases +- Getting BDD template IDs +- Creating BDD test cases +""" + +from beartype.typing import List, Tuple + +from trcli.api.api_client import APIClient +from trcli.cli import Environment + + +class BddHandler: + """Handles all BDD-related operations for TestRail""" + + def __init__(self, client: APIClient, environment: Environment): + """ + Initialize the BddHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + """ + self.client = client + self.environment = environment + + def add_bdd(self, section_id: int, feature_content: str) -> Tuple[List[int], str]: + """ + Upload .feature file to TestRail BDD endpoint + + Creates TestRail test case from Gherkin .feature content. + The Gherkin content is sent in the request body as plain text. + + Args: + section_id: TestRail section ID where test case will be created + feature_content: Raw .feature file content (Gherkin syntax) + + Returns: + Tuple of (case_ids, error_message) + - case_ids: List containing the created test case ID + - error_message: Empty string on success, error details on failure + """ + # Send Gherkin content as file upload (multipart/form-data) + # TestRail expects the .feature file as an attachment + self.environment.vlog(f"Uploading .feature file to add_bdd/{section_id}") + + files = {"attachment": ("feature.feature", feature_content, "text/plain")} + + response = self.client.send_post(f"add_bdd/{section_id}", payload=None, files=files) + + if response.status_code == 200: + # Response is a test case object with 'id' field + if isinstance(response.response_text, dict): + case_id = response.response_text.get("id") + if case_id: + return [case_id], "" + else: + return [], "Response missing 'id' field" + else: + return [], "Unexpected response format" + else: + error_msg = response.error_message or f"Failed to upload feature file (HTTP {response.status_code})" + return [], error_msg + + def get_bdd(self, case_id: int) -> Tuple[str, str]: + """ + Retrieve BDD test case as .feature file content + + Args: + case_id: TestRail test case ID + + Returns: + Tuple of (feature_content, error_message) + - feature_content: .feature file content (Gherkin syntax) + - error_message: Empty string on success, error details on failure + """ + self.environment.vlog(f"Retrieving BDD test case from get_bdd/{case_id}") + response = self.client.send_get(f"get_bdd/{case_id}") + + if response.status_code == 200: + # TestRail returns raw Gherkin text (not JSON) + # APIClient treats non-JSON as error and stores str(response.content) + if isinstance(response.response_text, dict): + # Some versions might return JSON with 'feature' field + feature_content = response.response_text.get("feature", "") + elif isinstance(response.response_text, str) and response.response_text.startswith("b'"): + # APIClient converted bytes to string representation: "b'text'" + # Need to extract the actual content + try: + # Remove b' prefix and ' suffix, then decode escape sequences + feature_content = response.response_text[2:-1].encode().decode("unicode_escape") + except (ValueError, AttributeError): + feature_content = response.response_text + else: + # Plain text response + feature_content = response.response_text + + return feature_content, "" + else: + error_msg = response.error_message or f"Failed to retrieve BDD test case (HTTP {response.status_code})" + return "", error_msg + + def get_bdd_template_id(self, project_id: int) -> Tuple[int, str]: + """ + Get the BDD template ID for a project + + Args: + project_id: TestRail project ID + + Returns: + Tuple of (template_id, error_message) + - template_id: BDD template ID if found, None otherwise + - error_message: Empty string on success, error details on failure + + API Endpoint: GET /api/v2/get_templates/{project_id} + """ + self.environment.vlog(f"Getting templates for project {project_id}") + response = self.client.send_get(f"get_templates/{project_id}") + + if response.status_code == 200: + templates = response.response_text + if isinstance(templates, list): + self.environment.vlog(f"Retrieved {len(templates)} template(s) from TestRail") + + # Log all available templates for debugging + if templates: + self.environment.vlog("Available templates:") + for template in templates: + template_id = template.get("id") + template_name = template.get("name", "") + self.environment.vlog(f" - ID {template_id}: '{template_name}'") + + # Look for BDD template by name + for template in templates: + template_name = template.get("name", "").strip() + template_name_lower = template_name.lower() + template_id = template.get("id") + + self.environment.vlog(f"Checking template '{template_name}' (ID: {template_id})") + self.environment.vlog(f" Lowercase: '{template_name_lower}'") + + # Check for BDD template (support both US and UK spellings) + if ( + "behavior" in template_name_lower + or "behaviour" in template_name_lower + or "bdd" in template_name_lower + ): + self.environment.vlog(f" ✓ MATCH: This is the BDD template!") + self.environment.log(f"Found BDD template: '{template_name}' (ID: {template_id})") + return template_id, "" + else: + self.environment.vlog(f" ✗ No match: Does not contain 'behavior', 'behaviour', or 'bdd'") + + # Build detailed error message with available templates + error_parts = ["BDD template not found. Please enable BDD template in TestRail project settings."] + if templates: + template_list = ", ".join([f"'{t.get('name', 'Unknown')}'" for t in templates]) + error_parts.append(f"Available templates: {template_list}") + error_parts.append("The BDD template name should contain 'behavior', 'behaviour', or 'bdd'.") + else: + error_parts.append("No templates are available in this project.") + + return None, "\n".join(error_parts) + else: + return None, "Unexpected response format from get_templates" + else: + error_msg = response.error_message or f"Failed to get templates (HTTP {response.status_code})" + return None, error_msg + + def add_case_bdd( + self, section_id: int, title: str, bdd_content: str, template_id: int, tags: List[str] = None + ) -> Tuple[int, str]: + """ + Create a BDD test case with Gherkin content + + Args: + section_id: TestRail section ID where test case will be created + title: Test case title (scenario name) + bdd_content: Gherkin scenario content + template_id: BDD template ID + tags: Optional list of tags (for refs field) + + Returns: + Tuple of (case_id, error_message) + - case_id: Created test case ID if successful, None otherwise + - error_message: Empty string on success, error details on failure + """ + self.environment.vlog(f"Creating BDD test case '{title}' in section {section_id}") + + # Build request body + # Note: custom_testrail_bdd_scenario expects an array of lines, not a single string + bdd_lines = bdd_content.split("\n") if bdd_content else [] + + body = { + "title": title, + "template_id": template_id, + "custom_testrail_bdd_scenario": bdd_lines, + } + + # Add tags as references if provided + if tags: + # Filter out @C tags (case IDs) and format others + ref_tags = [tag for tag in tags if not tag.upper().startswith("@C")] + if ref_tags: + body["refs"] = ", ".join(ref_tags) + + response = self.client.send_post(f"add_case/{section_id}", body) + + if response.status_code == 200: + if isinstance(response.response_text, dict): + case_id = response.response_text.get("id") + if case_id: + self.environment.vlog(f"Created BDD test case ID: {case_id}") + return case_id, "" + else: + return None, "Response missing 'id' field" + else: + return None, "Unexpected response format" + else: + error_msg = response.error_message or f"Failed to create BDD test case (HTTP {response.status_code})" + return None, error_msg diff --git a/trcli/api/case_handler.py b/trcli/api/case_handler.py new file mode 100644 index 0000000..4a6bf06 --- /dev/null +++ b/trcli/api/case_handler.py @@ -0,0 +1,217 @@ +""" +CaseHandler - Handles all test case-related operations for TestRail + +This class was extracted from ApiRequestHandler to follow the Single Responsibility Principle. +It manages all test case operations including: +- Adding test cases +- Updating case references +- Updating case automation IDs +- Deleting test cases +- Case helper operations +""" + +from concurrent.futures import ThreadPoolExecutor +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient, APIClientResult +from trcli.api.api_utils import ( + deduplicate_references, + join_references, + parse_references, + validate_references_length, +) +from trcli.cli import Environment +from trcli.constants import OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID +from trcli.data_classes.data_parsers import MatchersParser +from trcli.data_classes.dataclass_testrail import TestRailCase +from trcli.data_providers.api_data_provider import ApiDataProvider +from trcli.settings import MAX_WORKERS_ADD_CASE + + +class CaseHandler: + """Handles all test case-related operations for TestRail""" + + MAX_CASE_REFERENCES_LENGTH = 2000 # TestRail character limit for case refs field + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + handle_futures_callback, + retrieve_results_callback, + ): + """ + Initialize the CaseHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for case data + :param handle_futures_callback: Callback to handle concurrent futures + :param retrieve_results_callback: Callback to retrieve results after cancellation + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.handle_futures = handle_futures_callback + self.retrieve_results_after_cancelling = retrieve_results_callback + # Store active automation ID field (set by parent) + self._active_automation_id_field = None + + def add_cases(self) -> Tuple[List[dict], str]: + """ + Add cases that doesn't have ID in DataProvider. + Runs update_data in data_provider for successfully created resources. + + :returns: Tuple with list of dict created resources and error string. + """ + add_case_data = self.data_provider.add_cases() + responses = [] + error_message = "" + with self.environment.get_progress_bar( + results_amount=len(add_case_data), prefix="Adding test cases" + ) as progress_bar: + with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_CASE) as executor: + futures = { + executor.submit( + self._add_case_and_update_data, + body, + ): body + for body in add_case_data + } + responses, error_message = self.handle_futures( + futures=futures, action_string="add_case", progress_bar=progress_bar + ) + if error_message: + # When error_message is present we cannot be sure that responses contains all added items. + # Iterate through futures to get all responses from done tasks (not cancelled) + responses = self.retrieve_results_after_cancelling(futures) + returned_resources = [ + { + "case_id": response.response_text["id"], + "section_id": response.response_text["section_id"], + "title": response.response_text["title"], + } + for response in responses + ] + return returned_resources, error_message + + def _add_case_and_update_data(self, case: TestRailCase) -> APIClientResult: + """ + Helper method to add a single case and update its data + + :param case: TestRailCase object to add + :returns: APIClientResult + """ + case_body = case.to_dict() + active_field = self._active_automation_id_field + if active_field == UPDATED_SYSTEM_NAME_AUTOMATION_ID and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: + case_body[UPDATED_SYSTEM_NAME_AUTOMATION_ID] = case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) + if self.environment.case_matcher != MatchersParser.AUTO and OLD_SYSTEM_NAME_AUTOMATION_ID in case_body: + case_body.pop(OLD_SYSTEM_NAME_AUTOMATION_ID) + response = self.client.send_post(f"add_case/{case_body.pop('section_id')}", case_body) + if response.status_code == 200: + case.case_id = response.response_text["id"] + case.result.case_id = response.response_text["id"] + case.section_id = response.response_text["section_id"] + return response + + def update_existing_case_references( + self, case_id: int, junit_refs: str, strategy: str = "append" + ) -> Tuple[bool, str, List[str], List[str]]: + """ + Update existing case references with values from JUnit properties. + + :param case_id: ID of the test case + :param junit_refs: References from JUnit testrail_case_field property + :param strategy: 'append' or 'replace' + :returns: Tuple with (success, error_message, added_refs, skipped_refs) + """ + if not junit_refs or not junit_refs.strip(): + return True, None, [], [] # No references to process + + # Parse and deduplicate JUnit references using utility function + junit_ref_list = deduplicate_references(parse_references(junit_refs)) + + if not junit_ref_list: + return False, "No valid references found in JUnit property", [], [] + + # Get current case data + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.error_message: + return False, case_response.error_message, [], [] + + existing_refs = case_response.response_text.get("refs", "") or "" + + if strategy == "replace": + # Replace strategy: use JUnit refs as-is + new_refs = join_references(junit_ref_list) + added_refs = junit_ref_list + skipped_refs = [] + else: + # Append strategy: combine with existing refs, avoiding duplicates + existing_ref_list = parse_references(existing_refs) + + # Determine which references are new vs duplicates + added_refs = [ref for ref in junit_ref_list if ref not in existing_ref_list] + skipped_refs = [ref for ref in junit_ref_list if ref in existing_ref_list] + + # If no new references to add, return current state + if not added_refs: + return True, None, added_refs, skipped_refs + + # Combine references + combined_list = existing_ref_list + added_refs + new_refs = join_references(combined_list) + + # Validate 2000 character limit for test case references + is_valid, error_msg = validate_references_length(new_refs, self.MAX_CASE_REFERENCES_LENGTH) + if not is_valid: + return False, error_msg, [], [] + + # Update the case + update_data = {"refs": new_refs} + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.error_message: + return False, update_response.error_message, [], [] + + return True, None, added_refs, skipped_refs + + def delete_cases(self, suite_id: int, added_cases: List[Dict]) -> Tuple[Dict, str]: + """ + Delete cases given add_cases response + + :param suite_id: suite id + :param added_cases: List of cases to delete + :returns: Tuple with dict created resources and error string. + """ + body = {"case_ids": [case["case_id"] for case in added_cases]} + response = self.client.send_post(f"delete_cases/{suite_id}", payload=body) + return response.response_text, response.error_message + + def update_case_automation_id(self, case_id: int, automation_id: str) -> Tuple[bool, str]: + """ + Update the automation_id field of a test case + + Args: + case_id: TestRail test case ID + automation_id: Automation ID value to set + + Returns: + Tuple of (success, error_message) + - success: True if update succeeded, False otherwise + - error_message: Empty string on success, error details on failure + """ + self.environment.vlog(f"Setting automation_id '{automation_id}' on case {case_id}") + + update_data = {"custom_automation_id": automation_id} + update_response = self.client.send_post(f"update_case/{case_id}", update_data) + + if update_response.status_code == 200: + return True, "" + else: + error_msg = ( + update_response.error_message or f"Failed to update automation_id (HTTP {update_response.status_code})" + ) + return False, error_msg diff --git a/trcli/api/case_matcher.py b/trcli/api/case_matcher.py new file mode 100644 index 0000000..801c5be --- /dev/null +++ b/trcli/api/case_matcher.py @@ -0,0 +1,249 @@ +""" +CaseMatcherFactory - Strategy pattern implementation for TestRail case matching + +Matching Strategies: +- AutomationIdMatcher: Matches cases by automation_id field +- NameMatcher: Matches cases by name (requires case_id in test data) +- PropertyMatcher: Matches cases by custom property (requires case_id in test data) +""" + +import html +from abc import ABC, abstractmethod +from beartype.typing import Tuple, List, Dict, Set + +from trcli.cli import Environment +from trcli.constants import OLD_SYSTEM_NAME_AUTOMATION_ID, UPDATED_SYSTEM_NAME_AUTOMATION_ID +from trcli.data_classes.data_parsers import MatchersParser +from trcli.data_classes.dataclass_testrail import TestRailSuite +from trcli.data_providers.api_data_provider import ApiDataProvider + + +class CaseMatcher(ABC): + """Abstract base class for case matching strategies""" + + def __init__(self, environment: Environment, data_provider: ApiDataProvider): + """ + Initialize the case matcher + + :param environment: Environment configuration + :param data_provider: Data provider for accessing test case data + """ + self.environment = environment + self.data_provider = data_provider + + @abstractmethod + def check_missing_cases( + self, + project_id: int, + suite_id: int, + suites_data: TestRailSuite, + get_all_cases_callback, + validate_case_ids_callback, + ) -> Tuple[bool, str]: + """ + Check for missing test cases using the specific matching strategy + + :param project_id: TestRail project ID + :param suite_id: TestRail suite ID + :param suites_data: Test suite data from provider + :param get_all_cases_callback: Callback to fetch all cases from TestRail + :param validate_case_ids_callback: Callback to validate case IDs exist + :returns: Tuple (has_missing_cases, error_message) + """ + pass + + +class AutomationIdMatcher(CaseMatcher): + """Matches test cases by automation_id field""" + + def check_missing_cases( + self, + project_id: int, + suite_id: int, + suites_data: TestRailSuite, + get_all_cases_callback, + validate_case_ids_callback, + ) -> Tuple[bool, str]: + """ + Match cases using automation_id field + + :param project_id: TestRail project ID + :param suite_id: TestRail suite ID + :param suites_data: Test suite data from provider + :param get_all_cases_callback: Callback to fetch all cases from TestRail + :param validate_case_ids_callback: Callback to validate case IDs exist + :returns: Tuple (has_missing_cases, error_message) + """ + missing_cases_number = 0 + + # Fetch all cases from TestRail + returned_cases, error_message = get_all_cases_callback(project_id, suite_id) + if error_message: + return False, error_message + + # Build lookup dictionary: automation_id -> case data + test_cases_by_aut_id = {} + for case in returned_cases: + aut_case_id = case.get(OLD_SYSTEM_NAME_AUTOMATION_ID) or case.get(UPDATED_SYSTEM_NAME_AUTOMATION_ID) + if aut_case_id: + aut_case_id = html.unescape(aut_case_id) + test_cases_by_aut_id[aut_case_id] = case + + # Match test cases from report with TestRail cases + test_case_data = [] + for section in suites_data.testsections: + for test_case in section.testcases: + aut_id = test_case.custom_automation_id + if aut_id in test_cases_by_aut_id.keys(): + case = test_cases_by_aut_id[aut_id] + test_case_data.append( + { + "case_id": case["id"], + "section_id": case["section_id"], + "title": case["title"], + OLD_SYSTEM_NAME_AUTOMATION_ID: aut_id, + } + ) + else: + missing_cases_number += 1 + + # Update data provider with matched cases + self.data_provider.update_data(case_data=test_case_data) + + if missing_cases_number: + self.environment.log(f"Found {missing_cases_number} test cases not matching any TestRail case.") + + return missing_cases_number > 0, "" + + +class IdBasedMatcher(CaseMatcher): + """Base class for matchers that rely on case_id being present in test data (NAME, PROPERTY)""" + + def check_missing_cases( + self, + project_id: int, + suite_id: int, + suites_data: TestRailSuite, + get_all_cases_callback, + validate_case_ids_callback, + ) -> Tuple[bool, str]: + """ + Validate that case IDs exist in TestRail + + For NAME/PROPERTY matchers, the test data must already contain case_id. + This method validates those IDs exist in TestRail. + + :param project_id: TestRail project ID + :param suite_id: TestRail suite ID + :param suites_data: Test suite data from provider + :param get_all_cases_callback: Callback to fetch all cases from TestRail + :param validate_case_ids_callback: Callback to validate case IDs exist + :returns: Tuple (has_missing_cases, error_message) + """ + missing_cases_number = 0 + nonexistent_ids = [] + case_ids_to_validate = set() + + # Collect all unique case IDs that need validation + for section in suites_data.testsections: + for test_case in section.testcases: + if not test_case.case_id: + missing_cases_number += 1 + else: + case_ids_to_validate.add(int(test_case.case_id)) + + total_tests_in_report = missing_cases_number + len(case_ids_to_validate) + + if missing_cases_number: + self.environment.log(f"Found {missing_cases_number} test cases without case ID in the report file.") + + # Smart validation strategy based on report size + # Threshold: 1000 cases (same as skip validation threshold for consistency) + if case_ids_to_validate: + # Skip validation for large reports with all IDs (most efficient) + if missing_cases_number == 0 and total_tests_in_report >= 1000: + # All tests have IDs and report is large: Skip validation (trust IDs) + self.environment.log( + f"Skipping validation of {len(case_ids_to_validate)} case IDs " + f"(all tests have IDs, trusting they exist). " + f"If you encounter errors, ensure all case IDs in your test report exist in TestRail." + ) + nonexistent_ids = [] + + # Fetch all for large reports with missing IDs + elif total_tests_in_report >= 1000: + # Large report (>=1000 cases) with some missing IDs: Fetch all cases and validate locally + # This is more efficient than individual validation for large batches + self.environment.log( + f"Large report detected ({total_tests_in_report} cases). " + f"Fetching all cases from TestRail for efficient validation..." + ) + returned_cases, error_message = get_all_cases_callback(project_id, suite_id) + if error_message: + return False, error_message + + # Build lookup dictionary from fetched cases + all_case_ids = {case["id"] for case in returned_cases} + + # Validate locally (O(1) lookup) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in all_case_ids] + + if nonexistent_ids: + self.environment.elog( + f"Nonexistent case IDs found in the report file: {nonexistent_ids[:20]}" + f"{' ...' if len(nonexistent_ids) > 20 else ''}" + ) + return False, "Case IDs not in TestRail project or suite were detected in the report file." + + # Individual validation for small reports + else: + # Small report (<1000 cases): Use individual validation + # This is more efficient for small batches + self.environment.log(f"Validating {len(case_ids_to_validate)} case IDs exist in TestRail...") + validated_ids = validate_case_ids_callback(suite_id, list(case_ids_to_validate)) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in validated_ids] + + if nonexistent_ids: + self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") + return False, "Case IDs not in TestRail project or suite were detected in the report file." + + return missing_cases_number > 0, "" + + +class NameMatcher(IdBasedMatcher): + """Matches test cases by name (case_id must be present in test data)""" + + pass + + +class PropertyMatcher(IdBasedMatcher): + """Matches test cases by custom property (case_id must be present in test data)""" + + pass + + +class CaseMatcherFactory: + """Factory for creating appropriate case matcher based on configuration""" + + @staticmethod + def create_matcher( + matcher_type: MatchersParser, environment: Environment, data_provider: ApiDataProvider + ) -> CaseMatcher: + """ + Create the appropriate case matcher based on the matcher type + + :param matcher_type: Type of matcher to create (AUTO, NAME, PROPERTY). If None, defaults to AUTO. + :param environment: Environment configuration + :param data_provider: Data provider for accessing test case data + :returns: Concrete CaseMatcher instance + :raises ValueError: If matcher_type is not recognized + """ + # Default to AUTO if matcher_type is None (e.g., for parse_openapi command) + if matcher_type is None or matcher_type == MatchersParser.AUTO: + return AutomationIdMatcher(environment, data_provider) + elif matcher_type == MatchersParser.NAME: + return NameMatcher(environment, data_provider) + elif matcher_type == MatchersParser.PROPERTY: + return PropertyMatcher(environment, data_provider) + else: + raise ValueError(f"Unknown matcher type: {matcher_type}") diff --git a/trcli/api/label_manager.py b/trcli/api/label_manager.py new file mode 100644 index 0000000..e6f444b --- /dev/null +++ b/trcli/api/label_manager.py @@ -0,0 +1,644 @@ +""" +LabelManager - Handles all label-related operations for TestRail + +It manages all label operations including: +- Creating, retrieving, updating, and deleting labels +- Adding labels to test cases and tests +- Filtering cases and tests by labels +- Retrieving labels for specific tests +""" + +from beartype.typing import List, Union, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.cli import Environment + + +class LabelManager: + """Handles all label-related operations for TestRail""" + + MAX_LABELS_PER_ENTITY = 10 # TestRail limit + MAX_LABEL_TITLE_LENGTH = 20 # TestRail limit + + def __init__(self, client: APIClient, environment: Environment): + """ + Initialize the LabelManager + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + """ + self.client = client + self.environment = environment + + def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: + """ + Add a new label to the project + + :param project_id: ID of the project + :param title: Title of the label (max 20 characters) + :returns: Tuple with created label data and error string + """ + payload = {"title": title} + response = self.client.send_post(f"add_label/{project_id}", payload=payload) + return response.response_text, response.error_message + + def update_label(self, label_id: int, project_id: int, title: str) -> Tuple[dict, str]: + """ + Update an existing label + + :param label_id: ID of the label to update + :param project_id: ID of the project + :param title: New title for the label (max 20 characters) + :returns: Tuple with updated label data and error string + """ + payload = {"project_id": project_id, "title": title} + response = self.client.send_post(f"update_label/{label_id}", payload=payload) + return response.response_text, response.error_message + + def get_label(self, label_id: int) -> Tuple[dict, str]: + """ + Get a specific label by ID + + :param label_id: ID of the label to retrieve + :returns: Tuple with label data and error string + """ + response = self.client.send_get(f"get_label/{label_id}") + return response.response_text, response.error_message + + def get_labels(self, project_id: int, offset: int = 0, limit: int = 250) -> Tuple[dict, str]: + """ + Get all labels for a project with pagination + + :param project_id: ID of the project + :param offset: Offset for pagination + :param limit: Limit for pagination + :returns: Tuple with labels data (including pagination info) and error string + """ + params = [] + if offset > 0: + params.append(f"offset={offset}") + if limit != 250: + params.append(f"limit={limit}") + + url = f"get_labels/{project_id}" + if params: + url += "&" + "&".join(params) + + response = self.client.send_get(url) + return response.response_text, response.error_message + + def delete_label(self, label_id: int) -> Tuple[bool, str]: + """ + Delete a single label + + :param label_id: ID of the label to delete + :returns: Tuple with success status and error string + """ + response = self.client.send_post(f"delete_label/{label_id}") + success = response.status_code == 200 + return success, response.error_message + + def delete_labels(self, label_ids: List[int]) -> Tuple[bool, str]: + """ + Delete multiple labels + + :param label_ids: List of label IDs to delete + :returns: Tuple with success status and error string + """ + payload = {"label_ids": label_ids} + response = self.client.send_post("delete_labels", payload=payload) + success = response.status_code == 200 + return success, response.error_message + + def add_labels_to_cases( + self, + case_ids: List[int], + title: str, + project_id: int, + suite_id: int = None, + get_all_cases_callback=None, + ) -> Tuple[dict, str]: + """ + Add a label to multiple test cases + + :param case_ids: List of test case IDs + :param title: Label title (max 20 characters) + :param project_id: Project ID for validation + :param suite_id: Suite ID (optional) + :param get_all_cases_callback: Callback function to get all cases (injected dependency) + :returns: Tuple with response data and error string + """ + # Initialize results structure + results = {"successful_cases": [], "failed_cases": [], "max_labels_reached": [], "case_not_found": []} + + # Check if project is multi-suite by getting all cases without suite_id + all_cases_no_suite, error_message = get_all_cases_callback(project_id, None) + if error_message: + return results, error_message + + # Check if project has multiple suites + suite_ids = set() + for case in all_cases_no_suite: + if "suite_id" in case and case["suite_id"]: + suite_ids.add(case["suite_id"]) + + # If project has multiple suites and no suite_id provided, require it + if len(suite_ids) > 1 and suite_id is None: + return results, "This project is multisuite, suite id is required" + + # Get all cases to validate that the provided case IDs exist + all_cases, error_message = get_all_cases_callback(project_id, suite_id) + if error_message: + return results, error_message + + # Create a set of existing case IDs for quick lookup + existing_case_ids = {case["id"] for case in all_cases} + + # Validate case IDs and separate valid from invalid ones + invalid_case_ids = [case_id for case_id in case_ids if case_id not in existing_case_ids] + valid_case_ids = [case_id for case_id in case_ids if case_id in existing_case_ids] + + # Record invalid case IDs + for case_id in invalid_case_ids: + results["case_not_found"].append(case_id) + + # If no valid case IDs, return early + if not valid_case_ids: + return results, "" + + # Check if label exists or create it + existing_labels, error_message = self.get_labels(project_id) + if error_message: + return results, error_message + + # Find existing label with the same title + label_id = None + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + + # Collect case data and validate constraints + cases_to_update = [] + for case_id in valid_case_ids: + # Get current case to check existing labels + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + results["failed_cases"].append( + {"case_id": case_id, "error": f"Could not retrieve case {case_id}: {case_response.error_message}"} + ) + continue + + case_data = case_response.response_text + current_labels = case_data.get("labels", []) + + # Check if label already exists on this case + if any(label.get("id") == label_id for label in current_labels): + results["successful_cases"].append( + {"case_id": case_id, "message": f"Label '{title}' already exists on case {case_id}"} + ) + continue + + # Check maximum labels limit + if len(current_labels) >= self.MAX_LABELS_PER_ENTITY: + results["max_labels_reached"].append(case_id) + continue + + # Prepare case for update + existing_label_ids = [label.get("id") for label in current_labels if label.get("id")] + updated_label_ids = existing_label_ids + [label_id] + cases_to_update.append({"case_id": case_id, "labels": updated_label_ids}) + + # Update cases using appropriate endpoint + if len(cases_to_update) == 1: + # Single case: use update_case/{case_id} + case_info = cases_to_update[0] + case_update_data = {"labels": case_info["labels"]} + + update_response = self.client.send_post(f"update_case/{case_info['case_id']}", payload=case_update_data) + + if update_response.status_code == 200: + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) + else: + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + elif len(cases_to_update) > 1: + # Multiple cases: use update_cases/{suite_id} + # Need to determine suite_id from the cases + case_suite_id = suite_id + if not case_suite_id: + # Get suite_id from the first case if not provided + first_case = all_cases[0] if all_cases else None + case_suite_id = first_case.get("suite_id") if first_case else None + + if not case_suite_id: + # Fall back to individual updates if no suite_id available + for case_info in cases_to_update: + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + + if update_response.status_code == 200: + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) + else: + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + else: + # Batch update using update_cases/{suite_id} + batch_update_data = { + "case_ids": [case_info["case_id"] for case_info in cases_to_update], + "labels": cases_to_update[0]["labels"], # Assuming same labels for all cases + } + + batch_response = self.client.send_post(f"update_cases/{case_suite_id}", payload=batch_update_data) + + if batch_response.status_code == 200: + for case_info in cases_to_update: + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) + else: + # If batch update fails, fall back to individual updates + for case_info in cases_to_update: + case_update_data = {"labels": case_info["labels"]} + update_response = self.client.send_post( + f"update_case/{case_info['case_id']}", payload=case_update_data + ) + + if update_response.status_code == 200: + results["successful_cases"].append( + { + "case_id": case_info["case_id"], + "message": f"Successfully added label '{title}' to case {case_info['case_id']}", + } + ) + else: + results["failed_cases"].append( + {"case_id": case_info["case_id"], "error": update_response.error_message} + ) + + return results, "" + + def get_cases_by_label( + self, + project_id: int, + suite_id: int = None, + label_ids: List[int] = None, + label_title: str = None, + get_all_cases_callback=None, + ) -> Tuple[List[dict], str]: + """ + Get test cases filtered by label ID or title + + :param project_id: Project ID + :param suite_id: Suite ID (optional) + :param label_ids: List of label IDs to filter by + :param label_title: Label title to filter by + :param get_all_cases_callback: Callback function to get all cases (injected dependency) + :returns: Tuple with list of matching cases and error string + """ + # Get all cases first + all_cases, error_message = get_all_cases_callback(project_id, suite_id) + if error_message: + return [], error_message + + # If filtering by title, first get the label ID + target_label_ids = label_ids or [] + if label_title and not target_label_ids: + labels_data, error_message = self.get_labels(project_id) + if error_message: + return [], error_message + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + + if not target_label_ids: + return [], "" # No label found is a valid case with 0 results + + # Filter cases that have any of the target labels + matching_cases = [] + for case in all_cases: + case_labels = case.get("labels", []) + case_label_ids = [label.get("id") for label in case_labels] + + # Check if any of the target label IDs are present in this case + if any(label_id in case_label_ids for label_id in target_label_ids): + matching_cases.append(case) + + return matching_cases, "" + + def add_labels_to_tests( + self, test_ids: List[int], titles: Union[str, List[str]], project_id: int + ) -> Tuple[dict, str]: + """ + Add labels to multiple tests + + :param test_ids: List of test IDs + :param titles: Label title(s) - can be a single string or list of strings (max 20 characters each) + :param project_id: Project ID for validation + :returns: Tuple with response data and error string + """ + # Initialize results structure + results = {"successful_tests": [], "failed_tests": [], "max_labels_reached": [], "test_not_found": []} + + # Normalize titles to a list + if isinstance(titles, str): + title_list = [titles] + else: + title_list = titles + + # At this point, title_list should already be validated by the CLI + # Just ensure we have clean titles + title_list = [title.strip() for title in title_list if title.strip()] + + if not title_list: + return {}, "No valid labels provided" + + # Validate test IDs by getting run information for each test + valid_test_ids = [] + for test_id in test_ids: + # Get test information to validate it exists + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results["test_not_found"].append(test_id) + continue + + test_data = test_response.response_text + # Validate that the test belongs to the correct project + run_id = test_data.get("run_id") + if run_id: + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.status_code == 200: + run_data = run_response.response_text + if run_data.get("project_id") == project_id: + valid_test_ids.append(test_id) + else: + results["test_not_found"].append(test_id) + else: + results["test_not_found"].append(test_id) + else: + results["test_not_found"].append(test_id) + + # If no valid test IDs, return early + if not valid_test_ids: + return results, "" + + # Check if labels exist or create them + existing_labels, error_message = self.get_labels(project_id) + if error_message: + return results, error_message + + # Process each title to get/create label IDs + label_ids = [] + label_id_to_title = {} # Map label IDs to their titles + for title in title_list: + # Find existing label with the same title + label_id = None + for label in existing_labels.get("labels", []): + if label.get("title") == title: + label_id = label.get("id") + break + + # Create label if it doesn't exist + if label_id is None: + label_data, error_message = self.add_label(project_id, title) + if error_message: + return results, error_message + label_info = label_data.get("label", label_data) + label_id = label_info.get("id") + + if label_id: + label_ids.append(label_id) + label_id_to_title[label_id] = title + + # Collect test data and validate constraints + tests_to_update = [] + for test_id in valid_test_ids: + # Get current test to check existing labels + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results["failed_tests"].append( + {"test_id": test_id, "error": f"Could not retrieve test {test_id}: {test_response.error_message}"} + ) + continue + + test_data = test_response.response_text + current_labels = test_data.get("labels", []) + current_label_ids = [label.get("id") for label in current_labels if label.get("id")] + + new_label_ids = [] + already_exists_titles = [] + + for label_id in label_ids: + if label_id not in current_label_ids: + new_label_ids.append(label_id) + else: + if label_id in label_id_to_title: + already_exists_titles.append(label_id_to_title[label_id]) + + if not new_label_ids: + results["successful_tests"].append( + { + "test_id": test_id, + "message": f"All labels already exist on test {test_id}: {', '.join(already_exists_titles)}", + } + ) + continue + + # Check maximum labels limit + if len(current_label_ids) + len(new_label_ids) > self.MAX_LABELS_PER_ENTITY: + results["max_labels_reached"].append(test_id) + continue + + # Prepare test for update + updated_label_ids = current_label_ids + new_label_ids + + new_label_titles = [] + for label_id in new_label_ids: + if label_id in label_id_to_title: + new_label_titles.append(label_id_to_title[label_id]) + + tests_to_update.append( + { + "test_id": test_id, + "labels": updated_label_ids, + "new_labels": new_label_ids, + "new_label_titles": new_label_titles, + } + ) + + # Update tests using appropriate endpoint + if len(tests_to_update) == 1: + # Single test: use update_test/{test_id} + test_info = tests_to_update[0] + test_update_data = {"labels": test_info["labels"]} + + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + new_label_titles = test_info.get("new_label_titles", []) + new_label_count = len(new_label_titles) + + if new_label_count == 1: + message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" + elif new_label_count > 1: + message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" + else: + message = f"No new labels added to test {test_info['test_id']}" + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) + else: + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) + else: + # Multiple tests: use individual updates to ensure each test gets its specific labels + for test_info in tests_to_update: + test_update_data = {"labels": test_info["labels"]} + update_response = self.client.send_post(f"update_test/{test_info['test_id']}", payload=test_update_data) + + if update_response.status_code == 200: + new_label_titles = test_info.get("new_label_titles", []) + new_label_count = len(new_label_titles) + + if new_label_count == 1: + message = f"Successfully added label '{new_label_titles[0]}' to test {test_info['test_id']}" + elif new_label_count > 1: + message = f"Successfully added {new_label_count} labels ({', '.join(new_label_titles)}) to test {test_info['test_id']}" + else: + message = f"No new labels added to test {test_info['test_id']}" + + results["successful_tests"].append({"test_id": test_info["test_id"], "message": message}) + else: + results["failed_tests"].append( + {"test_id": test_info["test_id"], "error": update_response.error_message} + ) + + return results, "" + + def get_tests_by_label( + self, project_id: int, label_ids: List[int] = None, label_title: str = None, run_ids: List[int] = None + ) -> Tuple[List[dict], str]: + """ + Get tests filtered by label ID or title from specific runs + + :param project_id: Project ID + :param label_ids: List of label IDs to filter by + :param label_title: Label title to filter by + :param run_ids: List of run IDs to filter tests from (optional, defaults to all runs) + :returns: Tuple with list of matching tests and error string + """ + # If filtering by title, first get the label ID + target_label_ids = label_ids or [] + if label_title and not target_label_ids: + labels_data, error_message = self.get_labels(project_id) + if error_message: + return [], error_message + + for label in labels_data.get("labels", []): + if label.get("title") == label_title: + target_label_ids.append(label.get("id")) + + if not target_label_ids: + return [], "" # No label found is a valid case with 0 results + + # Get runs for the project (either all runs or specific run IDs) + if run_ids: + # Use specific run IDs - validate they exist by getting run details + runs = [] + for run_id in run_ids: + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.status_code == 200: + runs.append(run_response.response_text) + else: + return [], f"Run ID {run_id} not found or inaccessible" + else: + # Get all runs for the project + runs_response = self.client.send_get(f"get_runs/{project_id}") + if runs_response.status_code != 200: + return [], runs_response.error_message + + runs_data = runs_response.response_text + runs = runs_data.get("runs", []) if isinstance(runs_data, dict) else runs_data + + # Collect all tests from all runs + matching_tests = [] + for run in runs: + run_id = run.get("id") + if not run_id: + continue + + # Get tests for this run + tests_response = self.client.send_get(f"get_tests/{run_id}") + if tests_response.status_code != 200: + continue # Skip this run if we can't get tests + + tests_data = tests_response.response_text + tests = tests_data.get("tests", []) if isinstance(tests_data, dict) else tests_data + + # Filter tests that have any of the target labels + for test in tests: + test_labels = test.get("labels", []) + test_label_ids = [label.get("id") for label in test_labels] + + # Check if any of the target label IDs are present in this test + if any(label_id in test_label_ids for label_id in target_label_ids): + matching_tests.append(test) + + return matching_tests, "" + + def get_test_labels(self, test_ids: List[int]) -> Tuple[List[dict], str]: + """ + Get labels for specific tests + + :param test_ids: List of test IDs to get labels for + :returns: Tuple with list of test label information and error string + """ + results = [] + + for test_id in test_ids: + # Get test information + test_response = self.client.send_get(f"get_test/{test_id}") + if test_response.status_code != 200: + results.append({"test_id": test_id, "error": f"Test {test_id} not found or inaccessible", "labels": []}) + continue + + test_data = test_response.response_text + test_labels = test_data.get("labels", []) + + results.append( + { + "test_id": test_id, + "title": test_data.get("title", "Unknown"), + "status_id": test_data.get("status_id"), + "labels": test_labels, + "error": None, + } + ) + + return results, "" diff --git a/trcli/api/reference_manager.py b/trcli/api/reference_manager.py new file mode 100644 index 0000000..19c4e26 --- /dev/null +++ b/trcli/api/reference_manager.py @@ -0,0 +1,134 @@ +""" +ReferenceManager - Handles all reference-related operations for TestRail test cases + +It manages all reference operations including: +- Adding references to test cases +- Updating references on test cases +- Deleting references from test cases +""" + +from beartype.typing import List, Tuple, Optional + +from trcli.api.api_client import APIClient +from trcli.api.api_utils import ( + deduplicate_references, + join_references, + merge_references, + validate_references_length, + check_response_error, +) +from trcli.cli import Environment + + +class ReferenceManager: + """Handles all reference-related operations for TestRail test cases""" + + MAX_REFERENCES_LENGTH = 2000 # TestRail character limit for refs field + + def __init__(self, client: APIClient, environment: Environment): + """ + Initialize the ReferenceManager + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + """ + self.client = client + self.environment = environment + + def add_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: + """ + Add references to a test case (appends to existing references) + + :param case_id: ID of the test case + :param references: List of references to add + :returns: Tuple with success status and error string + """ + # Get current test case to retrieve existing references + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + error = check_response_error(case_response) + return False, ( + f"Failed to retrieve test case {case_id}: {error}" + if error + else f"Failed to retrieve test case {case_id}" + ) + + existing_refs = case_response.response_text.get("refs", "") or "" + + # Deduplicate and merge with existing references + deduplicated_input = deduplicate_references(references) + new_refs_string = merge_references(existing_refs, join_references(deduplicated_input), strategy="add") + + # Validate total character limit + is_valid, error_msg = validate_references_length(new_refs_string, self.MAX_REFERENCES_LENGTH) + if not is_valid: + return False, error_msg + + # Update the test case with new references + update_response = self.client.send_post(f"update_case/{case_id}", {"refs": new_refs_string}) + + if update_response.status_code == 200: + return True, "" + return False, update_response.error_message or "Failed to update references" + + def update_case_references(self, case_id: int, references: List[str]) -> Tuple[bool, str]: + """ + Update references on a test case by replacing existing ones + + :param case_id: ID of the test case + :param references: List of references to replace existing ones + :returns: Tuple with success status and error string + """ + # Deduplicate and join references + deduplicated_refs = deduplicate_references(references) + new_refs_string = join_references(deduplicated_refs) + + # Validate total character limit + is_valid, error_msg = validate_references_length(new_refs_string, self.MAX_REFERENCES_LENGTH) + if not is_valid: + return False, error_msg + + # Update the test case with new references + update_response = self.client.send_post(f"update_case/{case_id}", {"refs": new_refs_string}) + + if update_response.status_code == 200: + return True, "" + return False, update_response.error_message or "Failed to update references" + + def delete_case_references(self, case_id: int, specific_references: Optional[List[str]] = None) -> Tuple[bool, str]: + """ + Delete all or specific references from a test case + + :param case_id: ID of the test case + :param specific_references: List of specific references to delete (None to delete all) + :returns: Tuple with success status and error string + """ + if specific_references is None: + # Delete all references by setting refs to empty string + new_refs_string = "" + else: + # Get current test case to retrieve existing references + case_response = self.client.send_get(f"get_case/{case_id}") + if case_response.status_code != 200: + error = check_response_error(case_response) + return False, ( + f"Failed to retrieve test case {case_id}: {error}" + if error + else f"Failed to retrieve test case {case_id}" + ) + + existing_refs = case_response.response_text.get("refs", "") or "" + + if not existing_refs: + # No references to delete + return True, "" + + # Use utility to delete specific references + new_refs_string = merge_references(existing_refs, join_references(specific_references), strategy="delete") + + # Update the test case + update_response = self.client.send_post(f"update_case/{case_id}", {"refs": new_refs_string}) + + if update_response.status_code == 200: + return True, "" + return False, update_response.error_message or "Failed to delete references" diff --git a/trcli/api/result_handler.py b/trcli/api/result_handler.py new file mode 100644 index 0000000..7317614 --- /dev/null +++ b/trcli/api/result_handler.py @@ -0,0 +1,178 @@ +""" +ResultHandler - Handles all test result-related operations for TestRail + +It manages all test result operations including: +- Adding test results +- Uploading attachments to results +- Retrieving results after cancellation +""" + +import os +from concurrent.futures import ThreadPoolExecutor, as_completed +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.cli import Environment +from trcli.constants import FAULT_MAPPING +from trcli.data_providers.api_data_provider import ApiDataProvider +from trcli.settings import MAX_WORKERS_ADD_RESULTS + + +class ResultHandler: + """Handles all test result-related operations for TestRail""" + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + get_all_tests_in_run_callback, + handle_futures_callback, + ): + """ + Initialize the ResultHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for result data + :param get_all_tests_in_run_callback: Callback to fetch all tests in a run + :param handle_futures_callback: Callback to handle concurrent futures + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.__get_all_tests_in_run = get_all_tests_in_run_callback + self.handle_futures = handle_futures_callback + + def upload_attachments(self, report_results: List[Dict], results: List[Dict], run_id: int): + """ + Getting test result id and upload attachments for it. + + :param report_results: List of test results with attachments from report + :param results: List of created results from TestRail + :param run_id: Run ID + """ + tests_in_run, error = self.__get_all_tests_in_run(run_id) + if not error: + failed_uploads = [] + for report_result in report_results: + case_id = report_result["case_id"] + test_id = next((test["id"] for test in tests_in_run if test["case_id"] == case_id), None) + result_id = next((result["id"] for result in results if result["test_id"] == test_id), None) + for file_path in report_result.get("attachments"): + try: + with open(file_path, "rb") as file: + response = self.client.send_post( + f"add_attachment_to_result/{result_id}", files={"attachment": file} + ) + + # Check if upload was successful + if response.status_code != 200: + file_name = os.path.basename(file_path) + + # Handle 413 Request Entity Too Large specifically + if response.status_code == 413: + error_msg = FAULT_MAPPING["attachment_too_large"].format( + file_name=file_name, case_id=case_id + ) + self.environment.elog(error_msg) + failed_uploads.append(f"{file_name} (case {case_id})") + else: + # Handle other HTTP errors + error_msg = FAULT_MAPPING["attachment_upload_failed"].format( + file_path=file_name, + case_id=case_id, + error_message=response.error_message or f"HTTP {response.status_code}", + ) + self.environment.elog(error_msg) + failed_uploads.append(f"{file_name} (case {case_id})") + except FileNotFoundError: + self.environment.elog(f"Attachment file not found: {file_path} (case {case_id})") + failed_uploads.append(f"{file_path} (case {case_id})") + except Exception as ex: + file_name = os.path.basename(file_path) if os.path.exists(file_path) else file_path + self.environment.elog(f"Error uploading attachment '{file_name}' for case {case_id}: {ex}") + failed_uploads.append(f"{file_name} (case {case_id})") + + # Provide a summary if there were failed uploads + if failed_uploads: + self.environment.log(f"\nWarning: {len(failed_uploads)} attachment(s) failed to upload.") + else: + self.environment.elog(f"Unable to upload attachments due to API request error: {error}") + + def add_results(self, run_id: int) -> Tuple[List, str, int]: + """ + Adds one or more new test results. + + :param run_id: run id + :returns: Tuple with dict created resources, error string, and results count. + """ + responses = [] + error_message = "" + # Get pre-validated user IDs if available + user_ids = getattr(self.environment, "_validated_user_ids", []) + + add_results_data_chunks = self.data_provider.add_results_for_cases(self.environment.batch_size, user_ids) + # Get assigned count from data provider + assigned_count = getattr(self.data_provider, "_assigned_count", 0) + + results_amount = sum([len(results["results"]) for results in add_results_data_chunks]) + + with self.environment.get_progress_bar(results_amount=results_amount, prefix="Adding results") as progress_bar: + with ThreadPoolExecutor(max_workers=MAX_WORKERS_ADD_RESULTS) as executor: + futures = { + executor.submit(self.client.send_post, f"add_results_for_cases/{run_id}", body): body + for body in add_results_data_chunks + } + responses, error_message = self.handle_futures( + futures=futures, + action_string="add_results", + progress_bar=progress_bar, + ) + if error_message: + # When error_message is present we cannot be sure that responses contains all added items. + # Iterate through futures to get all responses from done tasks (not cancelled) + responses = ResultHandler.retrieve_results_after_cancelling(futures) + responses = [response.response_text for response in responses] + results = [result for results_list in responses for result in results_list] + report_results_w_attachments = [] + for results_data_chunk in add_results_data_chunks: + for test_result in results_data_chunk["results"]: + if test_result["attachments"]: + report_results_w_attachments.append(test_result) + if report_results_w_attachments: + attachments_count = 0 + for result in report_results_w_attachments: + attachments_count += len(result["attachments"]) + self.environment.log( + f"Uploading {attachments_count} attachments " f"for {len(report_results_w_attachments)} test results." + ) + self.upload_attachments(report_results_w_attachments, results, run_id) + else: + self.environment.log(f"No attachments found to upload.") + + # Log assignment results if assignment was performed + if user_ids: + total_failed = getattr(self.data_provider, "_total_failed_count", assigned_count) + if assigned_count > 0: + self.environment.log(f"Assigning failed results: {assigned_count}/{total_failed}, Done.") + else: + self.environment.log(f"Assigning failed results: 0/0, Done.") + + return responses, error_message, progress_bar.n + + @staticmethod + def retrieve_results_after_cancelling(futures) -> list: + """ + Retrieve results from futures after cancellation has been triggered. + + :param futures: Dictionary of futures + :returns: List of successful responses + """ + responses = [] + for future in as_completed(futures): + if not future.cancelled(): + response = future.result() + if not response.error_message: + responses.append(response) + return responses diff --git a/trcli/api/run_handler.py b/trcli/api/run_handler.py new file mode 100644 index 0000000..b735315 --- /dev/null +++ b/trcli/api/run_handler.py @@ -0,0 +1,292 @@ +""" +RunHandler - Handles all test run-related operations for TestRail + +It manages all test run operations including: +- Creating test runs +- Updating test runs +- Managing run references +- Closing and deleting runs +""" + +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.api.api_utils import ( + deduplicate_references, + join_references, + merge_references, + parse_references, + validate_references_length, +) +from trcli.cli import Environment +from trcli.data_providers.api_data_provider import ApiDataProvider + + +class RunHandler: + """Handles all test run-related operations for TestRail""" + + MAX_RUN_REFERENCES_LENGTH = 250 # TestRail character limit for run refs field + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + get_all_tests_in_run_callback, + ): + """ + Initialize the RunHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for run data + :param get_all_tests_in_run_callback: Callback to fetch all tests in a run + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.__get_all_tests_in_run = get_all_tests_in_run_callback + + def add_run( + self, + project_id: int, + run_name: str, + milestone_id: int = None, + start_date: str = None, + end_date: str = None, + plan_id: int = None, + config_ids: List[int] = None, + assigned_to_id: int = None, + include_all: bool = False, + refs: str = None, + case_ids: List[int] = None, + ) -> Tuple[int, str]: + """ + Creates a new test run. + + :param project_id: project_id + :param run_name: run name + :param milestone_id: milestone id + :param start_date: start date + :param end_date: end date + :param plan_id: plan id (if adding to plan) + :param config_ids: configuration ids + :param assigned_to_id: user id to assign + :param include_all: include all cases + :param refs: references + :param case_ids: specific case ids + :returns: Tuple with run id and error string. + """ + add_run_data = self.data_provider.add_run( + run_name, + case_ids=case_ids, + start_date=start_date, + end_date=end_date, + milestone_id=milestone_id, + assigned_to_id=assigned_to_id, + include_all=include_all, + refs=refs, + ) + if not plan_id: + response = self.client.send_post(f"add_run/{project_id}", add_run_data) + run_id = response.response_text.get("id") + else: + if config_ids: + add_run_data["config_ids"] = config_ids + entry_data = { + "name": add_run_data["name"], + "suite_id": add_run_data["suite_id"], + "config_ids": config_ids, + "runs": [add_run_data], + } + else: + entry_data = add_run_data + response = self.client.send_post(f"add_plan_entry/{plan_id}", entry_data) + run_id = response.response_text["runs"][0]["id"] + return run_id, response.error_message + + def update_run( + self, + run_id: int, + run_name: str, + start_date: str = None, + end_date: str = None, + milestone_id: int = None, + refs: str = None, + refs_action: str = "add", + ) -> Tuple[dict, str]: + """ + Updates an existing run + + :param run_id: run id + :param run_name: run name + :param start_date: start date + :param end_date: end date + :param milestone_id: milestone id + :param refs: references to manage + :param refs_action: action to perform ('add', 'update', 'delete') + :returns: Tuple with run and error string. + """ + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.error_message: + return None, run_response.error_message + + existing_description = run_response.response_text.get("description", "") + existing_refs = run_response.response_text.get("refs", "") + + add_run_data = self.data_provider.add_run( + run_name, start_date=start_date, end_date=end_date, milestone_id=milestone_id + ) + add_run_data["description"] = existing_description # Retain the current description + + # Handle references based on action + if refs is not None: + updated_refs = self._manage_references(existing_refs, refs, refs_action) + add_run_data["refs"] = updated_refs + else: + add_run_data["refs"] = existing_refs # Keep existing refs if none provided + + existing_include_all = run_response.response_text.get("include_all", False) + add_run_data["include_all"] = existing_include_all + + if not existing_include_all: + # Only manage explicit case_ids when include_all=False + run_tests, error_message = self.__get_all_tests_in_run(run_id) + if error_message: + return None, f"Failed to get tests in run: {error_message}" + run_case_ids = [test["case_id"] for test in run_tests] + report_case_ids = add_run_data["case_ids"] + joint_case_ids = list(set(report_case_ids + run_case_ids)) + add_run_data["case_ids"] = joint_case_ids + else: + # include_all=True: TestRail includes all suite cases automatically + # Do NOT send case_ids array (TestRail ignores it anyway) + add_run_data.pop("case_ids", None) + + plan_id = run_response.response_text["plan_id"] + config_ids = run_response.response_text["config_ids"] + if not plan_id: + update_response = self.client.send_post(f"update_run/{run_id}", add_run_data) + elif plan_id and config_ids: + update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", add_run_data) + else: + response = self.client.send_get(f"get_plan/{plan_id}") + entry_id = next( + ( + run["entry_id"] + for entry in response.response_text["entries"] + for run in entry["runs"] + if run["id"] == run_id + ), + None, + ) + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", add_run_data) + run_response = self.client.send_get(f"get_run/{run_id}") + return run_response.response_text, update_response.error_message + + def _manage_references(self, existing_refs: str, new_refs: str, action: str) -> str: + """ + Manage references based on the specified action. + + :param existing_refs: current references in the run + :param new_refs: new references to process + :param action: 'add', 'update', or 'delete' + :returns: updated references string + """ + # Use shared utility function for reference management + return merge_references(existing_refs or "", new_refs, strategy=action) + + def append_run_references(self, run_id: int, references: List[str]) -> Tuple[Dict, List[str], List[str], str]: + """ + Append references to a test run, avoiding duplicates. + + :param run_id: ID of the test run + :param references: List of references to append + :returns: Tuple with (run_data, added_refs, skipped_refs, error_message) + """ + # Get current run data + run_response = self.client.send_get(f"get_run/{run_id}") + if run_response.error_message: + return None, [], [], run_response.error_message + + existing_refs = run_response.response_text.get("refs", "") or "" + + # Deduplicate input references using utility function + deduplicated_input = deduplicate_references(references) + + # Parse existing references and calculate changes + existing_list = parse_references(existing_refs) + added_refs = [ref for ref in deduplicated_input if ref not in existing_list] + skipped_refs = [ref for ref in deduplicated_input if ref in existing_list] + + # If no new references to add, return current state + if not added_refs: + return run_response.response_text, added_refs, skipped_refs, None + + # Combine references using utility function + combined_refs = merge_references(existing_refs, join_references(deduplicated_input), strategy="add") + + # Validate character limit + is_valid, error_msg = validate_references_length(combined_refs, self.MAX_RUN_REFERENCES_LENGTH) + if not is_valid: + return None, [], [], error_msg + + update_data = {"refs": combined_refs} + + # Determine the correct API endpoint based on plan membership + plan_id = run_response.response_text.get("plan_id") + config_ids = run_response.response_text.get("config_ids") + + if not plan_id: + # Standalone run + update_response = self.client.send_post(f"update_run/{run_id}", update_data) + elif plan_id and config_ids: + # Run in plan with configurations + update_response = self.client.send_post(f"update_run_in_plan_entry/{run_id}", update_data) + else: + # Run in plan without configurations - need to use plan entry endpoint + plan_response = self.client.send_get(f"get_plan/{plan_id}") + if plan_response.error_message: + return None, [], [], f"Failed to get plan details: {plan_response.error_message}" + + # Find the entry_id for this run + entry_id = None + for entry in plan_response.response_text.get("entries", []): + for run in entry.get("runs", []): + if run["id"] == run_id: + entry_id = entry["id"] + break + if entry_id: + break + + if not entry_id: + return None, [], [], f"Could not find plan entry for run {run_id}" + + update_response = self.client.send_post(f"update_plan_entry/{plan_id}/{entry_id}", update_data) + + if update_response.error_message: + return None, [], [], update_response.error_message + + updated_run_response = self.client.send_get(f"get_run/{run_id}") + return updated_run_response.response_text, added_refs, skipped_refs, updated_run_response.error_message + + def close_run(self, run_id: int) -> Tuple[dict, str]: + """ + Closes an existing test run and archives its tests & results. + + :param run_id: run id + :returns: Tuple with dict created resources and error string. + """ + body = {"run_id": run_id} + response = self.client.send_post(f"close_run/{run_id}", body) + return response.response_text, response.error_message + + def delete_run(self, run_id: int) -> Tuple[dict, str]: + """ + Delete run given run id + + :param run_id: run id + :returns: Tuple with dict created resources and error string. + """ + response = self.client.send_post(f"delete_run/{run_id}", payload={}) + return response.response_text, response.error_message diff --git a/trcli/api/section_handler.py b/trcli/api/section_handler.py new file mode 100644 index 0000000..b47c5f1 --- /dev/null +++ b/trcli/api/section_handler.py @@ -0,0 +1,140 @@ +""" +SectionHandler - Handles all section-related operations for TestRail + +It manages all section operations including: +- Checking for missing sections +- Adding new sections +- Deleting sections +""" + +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.cli import Environment +from trcli.constants import FAULT_MAPPING +from trcli.data_classes.dataclass_testrail import TestRailSuite +from trcli.data_providers.api_data_provider import ApiDataProvider + + +class SectionHandler: + """Handles all section-related operations for TestRail""" + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + get_all_sections_callback, + ): + """ + Initialize the SectionHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for updating section data + :param get_all_sections_callback: Callback to fetch all sections from TestRail + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.__get_all_sections = get_all_sections_callback + + def check_missing_section_ids(self, project_id: int, suite_id: int, suites_data: TestRailSuite) -> Tuple[bool, str]: + """ + Check what section id's are missing in DataProvider. + + :param project_id: project_id + :param suite_id: suite_id + :param suites_data: Test suite data from provider + :returns: Tuple with list missing section ID and error string. + """ + returned_sections, error_message = self.__get_all_sections(project_id, suite_id) + if not error_message: + missing_test_sections = False + sections_by_id = {section["id"]: section for section in returned_sections} + sections_by_name = {section["name"]: section for section in returned_sections} + section_data = [] + for section in suites_data.testsections: + if self.environment.section_id: + if section.section_id in sections_by_id.keys(): + section_json = sections_by_id[section.section_id] + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) + else: + missing_test_sections = True + if section.name in sections_by_name.keys(): + section_json = sections_by_name[section.name] + section_data.append( + { + "section_id": section_json["id"], + "suite_id": section_json["suite_id"], + "name": section_json["name"], + } + ) + else: + missing_test_sections = True + self.data_provider.update_data(section_data=section_data) + return missing_test_sections, error_message + else: + return False, error_message + + def add_sections(self, project_id: int, verify_callback) -> Tuple[List[Dict], str]: + """ + Add sections that doesn't have ID in DataProvider. + Runs update_data in data_provider for successfully created resources. + + :param project_id: project_id + :param verify_callback: callback to verify returned data matches request + :returns: Tuple with list of dict created resources and error string. + """ + add_sections_data = self.data_provider.add_sections_data() + responses = [] + error_message = "" + for body in add_sections_data: + response = self.client.send_post(f"add_section/{project_id}", body) + if not response.error_message: + responses.append(response) + if not verify_callback(body, response.response_text): + responses.append(response) + error_message = FAULT_MAPPING["data_verification_error"] + break + else: + error_message = response.error_message + break + returned_resources = [ + { + "section_id": response.response_text["id"], + "suite_id": response.response_text["suite_id"], + "name": response.response_text["name"], + } + for response in responses + ] + ( + self.data_provider.update_data(section_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) + return returned_resources, error_message + + def delete_sections(self, added_sections: List[Dict]) -> Tuple[List, str]: + """ + Delete section given add_sections response + + :param added_sections: List of sections to delete + :returns: Tuple with dict created resources and error string. + """ + responses = [] + error_message = "" + for section in added_sections: + response = self.client.send_post(f"delete_section/{section['section_id']}", payload={}) + if not response.error_message: + responses.append(response.response_text) + else: + error_message = response.error_message + break + return responses, error_message diff --git a/trcli/api/suite_handler.py b/trcli/api/suite_handler.py new file mode 100644 index 0000000..40beaa5 --- /dev/null +++ b/trcli/api/suite_handler.py @@ -0,0 +1,163 @@ +""" +SuiteHandler - Handles all suite-related operations for TestRail + +It manages all suite operations including: +- Checking if suites exist +- Resolving suite IDs by name +- Getting suite IDs for projects +- Adding new suites +- Deleting suites +""" + +from beartype.typing import List, Tuple, Dict + +from trcli.api.api_client import APIClient +from trcli.cli import Environment +from trcli.constants import FAULT_MAPPING +from trcli.data_providers.api_data_provider import ApiDataProvider + + +class SuiteHandler: + """Handles all suite-related operations for TestRail""" + + def __init__( + self, + client: APIClient, + environment: Environment, + data_provider: ApiDataProvider, + get_all_suites_callback, + ): + """ + Initialize the SuiteHandler + + :param client: APIClient instance for making API calls + :param environment: Environment configuration + :param data_provider: Data provider for updating suite data + :param get_all_suites_callback: Callback to fetch all suites from TestRail + """ + self.client = client + self.environment = environment + self.data_provider = data_provider + self.__get_all_suites = get_all_suites_callback + + def check_suite_id(self, project_id: int, suite_id: int) -> Tuple[bool, str]: + """ + Check if suite exists using get_suites endpoint + + :param project_id: project id + :param suite_id: suite id to check + :returns: Tuple (exists, error_message) + """ + suites_data, error = self.__get_all_suites(project_id) + if not error: + available_suites = [suite for suite in suites_data if suite["id"] == suite_id] + return ( + (True, "") + if len(available_suites) > 0 + else (False, FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) + ) + else: + return None, error + + def resolve_suite_id_using_name(self, project_id: int, suite_name: str) -> Tuple[int, str]: + """ + Get suite ID matching suite name or returns -1 if unable to match any suite. + + :param project_id: project id + :param suite_name: suite name to match + :returns: tuple with id of the suite and error message + """ + suite_id = -1 + suites_data, error = self.__get_all_suites(project_id) + if not error: + for suite in suites_data: + if suite["name"] == suite_name: + suite_id = suite["id"] + self.data_provider.update_data([{"suite_id": suite["id"], "name": suite["name"]}]) + break + return ( + (suite_id, "") + if suite_id != -1 + else (-1, FAULT_MAPPING["missing_suite_by_name"].format(suite_name=suite_name)) + ) + else: + return -1, error + + def get_suite_ids(self, project_id: int) -> Tuple[List[int], str]: + """ + Get suite IDs for requested project_id. + + :param project_id: project id + :returns: tuple with list of suite ids and error string + """ + available_suites = [] + returned_resources = [] + suites_data, error = self.__get_all_suites(project_id) + if not error: + for suite in suites_data: + available_suites.append(suite["id"]) + returned_resources.append( + { + "suite_id": suite["id"], + "name": suite["name"], + } + ) + if returned_resources: + self.data_provider.update_data(suite_data=returned_resources) + else: + print("Update skipped") + return ( + (available_suites, "") + if len(available_suites) > 0 + else ([], FAULT_MAPPING["no_suites_found"].format(project_id=project_id)) + ) + else: + return [], error + + def add_suites(self, project_id: int, verify_callback) -> Tuple[List[Dict], str]: + """ + Adds suites that doesn't have ID's in DataProvider. + Runs update_data in data_provider for successfully created resources. + + :param project_id: project_id + :param verify_callback: callback to verify returned data matches request + :returns: Tuple with list of dict created resources and error string. + """ + add_suite_data = self.data_provider.add_suites_data() + responses = [] + error_message = "" + for body in add_suite_data: + response = self.client.send_post(f"add_suite/{project_id}", body) + if not response.error_message: + responses.append(response) + if not verify_callback(body, response.response_text): + responses.append(response) + error_message = FAULT_MAPPING["data_verification_error"] + break + else: + error_message = response.error_message + break + + returned_resources = [ + { + "suite_id": response.response_text["id"], + "name": response.response_text["name"], + } + for response in responses + ] + ( + self.data_provider.update_data(suite_data=returned_resources) + if len(returned_resources) > 0 + else "Update skipped" + ) + return returned_resources, error_message + + def delete_suite(self, suite_id: int) -> Tuple[dict, str]: + """ + Delete suite given suite id + + :param suite_id: suite id + :returns: Tuple with dict created resources and error string. + """ + response = self.client.send_post(f"delete_suite/{suite_id}", payload={}) + return response.response_text, response.error_message diff --git a/trcli/cli.py b/trcli/cli.py index 155e1e6..6d3e8ba 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -77,6 +77,7 @@ def __init__(self, cmd="parse_junit"): self.assign_failed_to = None # Add proxy related attributes self.noproxy = None self.proxy_user = None + self.parallel_pagination = None @property def case_fields(self): @@ -90,7 +91,7 @@ def case_fields(self, case_fields: Union[List[str], dict]): exit(1) self._case_fields = fields_dict - @property + @property def result_fields(self): return self._result_fields @@ -143,6 +144,21 @@ def set_parameters(self, context: click.core.Context): param_sources_types = [ParameterSource.DEFAULT] else: param_sources_types = [ParameterSource.DEFAULT, ParameterSource.ENVIRONMENT] + + # First, get parameters from parent context (global options like --verbose) + if context.parent: + for param, value in context.parent.params.items(): + if param == "config": + continue + param_config_value = self.params_from_config.get(param, None) + param_source = context.parent.get_parameter_source(param) + + if param_source in param_sources_types and param_config_value is not None: + setattr(self, param, param_config_value) + else: + setattr(self, param, value) + + # Then, process current context parameters (subcommand-specific options) for param, value in context.params.items(): # Don't set config again # Skip setting config again @@ -202,18 +218,11 @@ def parse_params_from_config_file(self, file_path: Path): for page_content in file_content: if page_content: self.params_from_config.update(page_content) - if ( - self.params_from_config.get("config") is not None - and self.default_config_file - ): + if self.params_from_config.get("config") is not None and self.default_config_file: self.default_config_file = False - self.parse_params_from_config_file( - self.params_from_config["config"] - ) + self.parse_params_from_config_file(self.params_from_config["config"]) except (yaml.YAMLError, ValueError, TypeError) as e: - self.elog( - FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path) - ) + self.elog(FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path)) self.elog(f"Error details:\n{e}") if not self.default_config_file: exit(1) @@ -280,10 +289,13 @@ def main(self, *args, **kwargs): ) @click.option("-u", "--username", type=click.STRING, metavar="", help="Username.") @click.option("-p", "--password", type=click.STRING, metavar="", help="Password.") -@click.option("-k", "--key", metavar="", help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.") @click.option( - "-v", "--verbose", is_flag=True, help="Output all API calls and their results." + "-k", + "--key", + metavar="", + help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.", ) +@click.option("-v", "--verbose", is_flag=True, help="Output all API calls and their results.") @click.option("--verify", is_flag=True, help="Verify the data was added correctly.") @click.option("--insecure", is_flag=True, help="Allow insecure requests.") @click.option( @@ -328,22 +340,14 @@ def main(self, *args, **kwargs): help="Silence stdout", default=False, ) +@click.option("--proxy", metavar="", help="Proxy address and port (e.g., http://proxy.example.com:8080).") +@click.option("--proxy-user", metavar="", help="Proxy username and password in the format 'username:password'.") @click.option( - "--proxy", - metavar="", - help="Proxy address and port (e.g., http://proxy.example.com:8080)." -) -@click.option( - "--proxy-user", - metavar="", - help="Proxy username and password in the format 'username:password'." + "--noproxy", metavar="", help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)." ) @click.option( - "--noproxy", - metavar="", - help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)." + "--parallel-pagination", is_flag=True, help="Enable parallel pagination for faster case fetching (experimental)." ) - def cli(environment: Environment, context: click.core.Context, *args, **kwargs): """TestRail CLI""" if not sys.argv[1:]: @@ -354,6 +358,6 @@ def cli(environment: Environment, context: click.core.Context, *args, **kwargs): if not context.invoked_subcommand: print(MISSING_COMMAND_SLOGAN) exit(2) - + environment.parse_config_file(context) environment.set_parameters(context) diff --git a/trcli/commands/cmd_export_gherkin.py b/trcli/commands/cmd_export_gherkin.py new file mode 100644 index 0000000..cc4941f --- /dev/null +++ b/trcli/commands/cmd_export_gherkin.py @@ -0,0 +1,132 @@ +import click +from pathlib import Path + +from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.constants import FAULT_MAPPING +from trcli.api.api_client import APIClient +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.data_classes.dataclass_testrail import TestRailSuite +import trcli + + +@click.command(context_settings=CONTEXT_SETTINGS) +@click.option( + "--case-id", + type=click.IntRange(min=1), + metavar="", + required=True, + help="TestRail test case ID to export as .feature file.", +) +@click.option( + "--output", + type=click.Path(), + metavar="", + help="Output path for the .feature file. If not specified, prints to stdout.", +) +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, case_id: int, output: str, **kwargs): + """Export BDD test case from TestRail as .feature file + + This command retrieves a test case from TestRail's BDD endpoint + and exports it as a Gherkin .feature file. + + The test case must have been created via the BDD import functionality + for this command to work. + + Mapping Rules (TestRail to .feature): + - Test Case name → Feature: + - Preconditions field → Free text after Feature: + - BDD Scenario field → Background:/Scenario:/Scenario Outline:/Rule: + - Reference field → @Tags before Feature: (@ added) + - BDD field tags → @Tags before scenarios + + Examples: + # Export to file + trcli export_gherkin --case-id 456 --output login.feature --project-id 1 + + # Print to stdout + trcli export_gherkin --case-id 456 --project-id 1 + """ + environment.cmd = "export_gherkin" + environment.set_parameters(context) + environment.check_for_required_parameters() + + try: + environment.vlog(f"Target case ID: {case_id}") + environment.vlog(f"API endpoint: GET /api/v2/get_bdd/{case_id}") + + # Initialize API client + environment.log("Connecting to TestRail...") + + # Create APIClient + uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) + api_client = APIClient( + host_name=environment.host, + verify=not environment.insecure, + verbose_logging_function=environment.vlog, + logging_function=environment.log, + uploader_metadata=uploader_metadata, + ) + + # Set credentials after initialization + api_client.username = environment.username + api_client.password = environment.password + api_client.api_key = environment.key + + # Create minimal suite for ApiRequestHandler (BDD operations don't need suite data) + minimal_suite = TestRailSuite(name="BDD Export", testsections=[]) + + # Create ApiRequestHandler + api_request_handler = ApiRequestHandler( + environment=environment, + api_client=api_client, + suites_data=minimal_suite, + ) + + # Get BDD test case + environment.log(f"Retrieving BDD test case {case_id}...") + feature_content, error_message = api_request_handler.get_bdd(case_id) + + if error_message: + environment.elog(f"Error retrieving test case: {error_message}") + exit(1) + + if not feature_content or not feature_content.strip(): + environment.elog(f"Error: No BDD content found for case ID {case_id}") + environment.elog("This test case may not have been created via BDD import.") + exit(1) + + # Output results + if output: + output_path = Path(output) + + if environment.verbose: + environment.log(f"Writing feature file to: {output_path}") + + # Create parent directory if it doesn't exist + output_path.parent.mkdir(parents=True, exist_ok=True) + + with open(output_path, "w", encoding="utf-8") as f: + f.write(feature_content) + + environment.log(f"\n✓ Successfully exported test case {case_id}") + environment.log(f" File: {output_path}") + environment.log(f" Size: {len(feature_content)} characters") + else: + # Print to stdout + print(feature_content) + + except PermissionError: + environment.elog(f"Error: Permission denied writing to file: {output}") + exit(1) + except IOError as e: + environment.elog(f"Error writing file: {str(e)}") + exit(1) + except Exception as e: + environment.elog(f"Unexpected error: {str(e)}") + if environment.verbose: + import traceback + + environment.elog(traceback.format_exc()) + exit(1) diff --git a/trcli/commands/cmd_import_gherkin.py b/trcli/commands/cmd_import_gherkin.py new file mode 100644 index 0000000..4d21ebf --- /dev/null +++ b/trcli/commands/cmd_import_gherkin.py @@ -0,0 +1,138 @@ +import click +from pathlib import Path + +from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.constants import FAULT_MAPPING +from trcli.api.api_client import APIClient +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.data_classes.dataclass_testrail import TestRailSuite +import trcli + + +@click.command(context_settings=CONTEXT_SETTINGS) +@click.option( + "-f", + "--file", + type=click.Path(exists=True), + metavar="", + required=True, + help="Path to Gherkin .feature file to upload.", +) +@click.option( + "--section-id", + type=click.IntRange(min=1), + metavar="", + required=True, + help="TestRail section ID where test cases will be created.", +) +@click.option("--json-output", is_flag=True, help="Output case IDs in JSON format.") +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, file: str, section_id: int, **kwargs): + """Upload Gherkin .feature file to TestRail + + This command uploads a Gherkin/BDD .feature file directly to TestRail, + which will create or update test cases based on the scenarios in the file. + + TestRail will parse the .feature file and automatically create test cases + for each scenario, maintaining the BDD structure in TestRail's native format. + + Mapping Rules (.feature to TestRail): + - Feature: → Test Case name + - Free text after Feature: → Preconditions field + - Background:/Scenario:/Scenario Outline:/Rule: → BDD Scenario field + - Examples: (under Scenario Outline/Rule) → Same BDD field as parent + - @Tags before Feature: → Reference field (@ stripped) + - @Tags before scenarios → BDD field + + Example: + trcli import_gherkin -f login.feature --section-id 123 --project-id 1 + """ + environment.cmd = "import_gherkin" + environment.set_parameters(context) + environment.check_for_required_parameters() + + try: + # Read the feature file + feature_path = Path(file) + if environment.verbose: + environment.log(f"Reading feature file: {feature_path}") + + with open(feature_path, "r", encoding="utf-8") as f: + feature_content = f.read() + + if not feature_content.strip(): + environment.elog("Error: Feature file is empty") + exit(1) + + environment.vlog(f"Feature file size: {len(feature_content)} characters") + environment.vlog(f"Target section ID: {section_id}") + environment.vlog(f"API endpoint: POST /api/v2/add_bdd/{section_id}") + + # Initialize API client + environment.log("Connecting to TestRail...") + + # Create APIClient + uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) + api_client = APIClient( + host_name=environment.host, + verify=not environment.insecure, + verbose_logging_function=environment.vlog, + logging_function=environment.log, + uploader_metadata=uploader_metadata, + ) + + # Set credentials after initialization + api_client.username = environment.username + api_client.password = environment.password + api_client.api_key = environment.key + + # Create minimal suite for ApiRequestHandler (BDD operations don't need suite data) + minimal_suite = TestRailSuite(name="BDD Import", testsections=[]) + + # Create ApiRequestHandler + api_request_handler = ApiRequestHandler( + environment=environment, + api_client=api_client, + suites_data=minimal_suite, + ) + + # Upload feature file + environment.log(f"Uploading feature file to TestRail...") + case_ids, error_message = api_request_handler.add_bdd(section_id, feature_content) + + if error_message: + environment.elog(f"Error uploading feature file: {error_message}") + exit(1) + + if not case_ids: + environment.log("Warning: No case IDs returned from TestRail") + environment.log("Feature file was uploaded but no cases were created.") + exit(0) + + # Display results + if kwargs.get("json_output"): + import json + + print(json.dumps({"case_ids": case_ids, "count": len(case_ids)}, indent=2)) + else: + environment.log(f"\nSuccessfully uploaded feature file!") + environment.log(f" Created/updated {len(case_ids)} test case(s)") + environment.log(f" Case IDs: {', '.join(map(str, case_ids))}") + + except FileNotFoundError: + environment.elog(f"Error: Feature file not found: {file}") + exit(1) + except PermissionError: + environment.elog(f"Error: Permission denied reading feature file: {file}") + exit(1) + except UnicodeDecodeError: + environment.elog(f"Error: Feature file must be UTF-8 encoded: {file}") + exit(1) + except Exception as e: + environment.elog(f"Unexpected error: {str(e)}") + if environment.verbose: + import traceback + + environment.elog(traceback.format_exc()) + exit(1) diff --git a/trcli/commands/cmd_parse_cucumber.py b/trcli/commands/cmd_parse_cucumber.py new file mode 100644 index 0000000..eaeddab --- /dev/null +++ b/trcli/commands/cmd_parse_cucumber.py @@ -0,0 +1,270 @@ +import click +import json + +from trcli.api.results_uploader import ResultsUploader +from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.commands.results_parser_helpers import results_parser_options, print_config +from trcli.constants import FAULT_MAPPING, ProjectErrors +from trcli.data_classes.validation_exception import ValidationException +from trcli.readers.cucumber_json import CucumberParser + + +@click.command(context_settings=CONTEXT_SETTINGS) +@results_parser_options +@click.option( + "--upload-feature", + is_flag=True, + help="Generate and upload .feature file to create/update test cases via BDD endpoint.", +) +@click.option( + "--feature-section-id", + type=click.IntRange(min=1), + metavar="", + help="Section ID for uploading .feature file (required if --upload-feature is used).", +) +@click.option( + "-v", + "--verbose", + is_flag=True, + help="Enable verbose logging output.", +) +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, *args, **kwargs): + """Parse Cucumber JSON results and upload to TestRail + + This command parses Cucumber JSON test results and uploads them to TestRail. + """ + environment.cmd = "parse_cucumber" + environment.set_parameters(context) + environment.check_for_required_parameters() + + # Set verbose mode if requested + if kwargs.get("verbose"): + environment.verbose = True + + # Validate feature upload options + upload_feature = kwargs.get("upload_feature", False) + feature_section_id = kwargs.get("feature_section_id") + + if upload_feature and not feature_section_id: + environment.elog("Error: --feature-section-id is required when using --upload-feature") + exit(1) + + print_config(environment) + + try: + # Parse Cucumber JSON file + parsed_suites = CucumberParser(environment).parse_file() + + # Workflow: Upload feature file if requested + # Only create test cases if auto-creation is enabled + if upload_feature and environment.auto_creation_response: + environment.log("\n=== Phase 1: Creating BDD Test Cases ===") + + # Setup API client + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.api.api_client import APIClient + import trcli + + environment.vlog("Initializing API client for BDD upload...") + uploader_metadata = APIClient.build_uploader_metadata(version=trcli.__version__) + api_client = APIClient( + host_name=environment.host, + verify=not environment.insecure, + verbose_logging_function=environment.vlog, + logging_function=environment.log, + uploader_metadata=uploader_metadata, + ) + + # Set credentials + api_client.username = environment.username + api_client.password = environment.password + api_client.api_key = environment.key + + # Create minimal suite for ApiRequestHandler + minimal_suite = parsed_suites[0] if parsed_suites else None + if not minimal_suite: + from trcli.data_classes.dataclass_testrail import TestRailSuite + + minimal_suite = TestRailSuite(name="Cucumber BDD", testsections=[]) + + # Set suite_id from environment if provided + if environment.suite_id: + minimal_suite.suite_id = environment.suite_id + + # Create ApiRequestHandler + api_handler = ApiRequestHandler( + environment=environment, + api_client=api_client, + suites_data=minimal_suite, + ) + + # Resolve project to get actual project_id + environment.log("Checking project. ", new_line=False) + project_data = api_handler.get_project_data(environment.project, environment.project_id) + + # Validate project was found + if project_data.project_id == ProjectErrors.not_existing_project: + environment.elog(f"\n{project_data.error_message}") + exit(1) + elif project_data.project_id == ProjectErrors.other_error: + environment.elog(f"\nError checking project: {project_data.error_message}") + exit(1) + elif project_data.project_id == ProjectErrors.multiple_project_same_name: + environment.elog(f"\nError checking project: {project_data.error_message}") + exit(1) + + environment.log("Done.") + resolved_project_id = project_data.project_id + + # Get BDD template ID + environment.log("Getting BDD template ID...") + bdd_template_id, error_message = api_handler.get_bdd_template_id(resolved_project_id) + + if error_message: + environment.elog(f"Error getting BDD template: {error_message}") + exit(1) + + environment.vlog(f"Using BDD template ID: {bdd_template_id}") + + # Load Cucumber JSON to access raw feature data + parser = CucumberParser(environment) + with open(environment.file, "r", encoding="utf-8") as f: + cucumber_data = json.load(f) + + if not isinstance(cucumber_data, list) or not cucumber_data: + environment.elog("Error: Invalid Cucumber JSON format") + exit(1) + + # Create BDD test cases (one per feature) + environment.log("Creating BDD test cases from features...") + case_ids = [] + feature_scenario_counts = [] # Track how many scenarios per feature + + for feature in cucumber_data: + feature_name = feature.get("name", "Untitled Feature") + + # Count scenarios in this feature (excluding backgrounds) + scenario_count = sum( + 1 + for element in feature.get("elements", []) + if element.get("type", "") in ("scenario", "scenario_outline") + ) + + if scenario_count == 0: + environment.vlog(f"Skipping feature '{feature_name}' - no scenarios found") + continue + + # Generate complete .feature file content for this feature + environment.vlog(f"Generating .feature file for feature: {feature_name}") + feature_content = parser._generate_feature_content(feature) + + # Upload .feature file via add_bdd endpoint + environment.vlog(f"Uploading feature '{feature_name}' with {scenario_count} scenario(s)") + returned_case_ids, error_message = api_handler.add_bdd( + section_id=feature_section_id, feature_content=feature_content + ) + + if error_message: + environment.elog(f"Error creating BDD test case for feature '{feature_name}': {error_message}") + exit(1) + + if not returned_case_ids or len(returned_case_ids) == 0: + environment.elog(f"Error: add_bdd did not return a case ID for feature '{feature_name}'") + exit(1) + + case_id = returned_case_ids[0] # add_bdd returns list with one case ID + case_ids.append(case_id) + feature_scenario_counts.append(scenario_count) + environment.vlog(f" Created case ID: {case_id} (covers {scenario_count} scenario(s))") + + # Set automation_id on the created test case for future matching + # Use feature name as automation_id (one TestRail case = one feature) + automation_id = feature_name + success, error_message = api_handler.update_case_automation_id(case_id, automation_id) + + if not success: + environment.log(f" Warning: Failed to set automation_id: {error_message}") + else: + environment.vlog(f" Set automation_id: '{automation_id}'") + + environment.log(f"✓ Successfully created {len(case_ids)} BDD test case(s)") + environment.log(f" Case IDs: {', '.join(map(str, case_ids))}") + + # Map returned case IDs to parsed test cases + environment.vlog("\nMapping case IDs to test results...") + + # Map case IDs to sections (one case ID per feature/section) + # Each feature creates one test case in TestRail but may have multiple scenario results + total_mapped = 0 + if len(case_ids) != len(parsed_suites[0].testsections): + environment.elog( + f"Error: Mismatch between features ({len(case_ids)}) and parsed sections ({len(parsed_suites[0].testsections)})" + ) + exit(1) + + for section, case_id, scenario_count in zip( + parsed_suites[0].testsections, case_ids, feature_scenario_counts + ): + environment.vlog( + f"Mapping case ID {case_id} to section '{section.name}' ({len(section.testcases)} scenario(s))" + ) + + # Assign the same case ID to ALL test cases (scenarios) in this section + for test_case in section.testcases: + test_case.case_id = case_id + if test_case.result: + test_case.result.case_id = case_id + total_mapped += 1 + + environment.vlog(f"Mapped {len(case_ids)} case ID(s) to {total_mapped} test result(s)") + + environment.log("\nProceeding to upload test results...") + elif upload_feature and not environment.auto_creation_response: + # Auto-creation is disabled, skip test case creation + environment.log("\n=== Skipping BDD Test Case Creation ===") + environment.log("Auto-creation disabled (-n flag). Will match scenarios using automation_id.") + + # Upload test results + environment.log("\n=== Phase 2: Uploading Test Results ===") + + # Ensure all suites have suite_id set from environment + for suite in parsed_suites: + if environment.suite_id and not suite.suite_id: + suite.suite_id = environment.suite_id + + run_id = None + for suite in parsed_suites: + result_uploader = ResultsUploader(environment=environment, suite=suite) + result_uploader.upload_results() + + if run_id is None and hasattr(result_uploader, "last_run_id"): + run_id = result_uploader.last_run_id + + # Summary + if run_id: + environment.log(f"\n✓ Results uploaded successfully to run ID: {run_id}") + else: + environment.log("\n✓ Results processing completed") + + except FileNotFoundError: + environment.elog(f"Error: Cucumber JSON file not found: {environment.file}") + exit(1) + except json.JSONDecodeError as e: + environment.elog(f"Error: Invalid JSON format in file: {environment.file}") + environment.elog(f" {str(e)}") + exit(1) + except ValidationException as e: + environment.elog(f"Validation error: {str(e)}") + exit(1) + except ValueError as e: + environment.elog(f"Error parsing Cucumber JSON: {str(e)}") + exit(1) + except Exception as e: + environment.elog(f"Unexpected error: {str(e)}") + if environment.verbose: + import traceback + + environment.elog(traceback.format_exc()) + exit(1) diff --git a/trcli/commands/cmd_parse_gherkin.py b/trcli/commands/cmd_parse_gherkin.py new file mode 100644 index 0000000..c9af7f0 --- /dev/null +++ b/trcli/commands/cmd_parse_gherkin.py @@ -0,0 +1,141 @@ +import json +import click + +from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.constants import FAULT_MAPPING +from trcli.readers.gherkin_parser import GherkinParser +from serde import to_dict + + +@click.command(context_settings=CONTEXT_SETTINGS) +@click.option( + "-f", + "--file", + type=click.Path(exists=True), + metavar="", + required=True, + help="Path to Gherkin .feature file to parse.", +) +@click.option("--output", type=click.Path(), metavar="", help="Optional output file path to save parsed JSON.") +@click.option("--pretty", is_flag=True, help="Pretty print JSON output with indentation.") +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, file: str, output: str, pretty: bool): + """Parse Gherkin .feature file locally + + This command parses Gherkin/BDD .feature files and converts them into + TestRail data structure format without uploading to TestRail. + + """ + environment.cmd = "parse_gherkin" + environment.file = file + + try: + # Parse the feature file + parser = GherkinParser(environment) + parsed_suites = parser.parse_file() + + # Convert to dictionary format (manual serialization to include skipped fields) + suites_data = [] + for suite in parsed_suites: + # Manually serialize the suite to include testsections + sections_data = [] + for section in suite.testsections: + # Manually serialize test cases + cases_data = [] + for case in section.testcases: + case_dict = { + "title": case.title, + "case_id": case.case_id, + "custom_automation_id": case.custom_automation_id, + "case_fields": case.case_fields, + } + # Include result if present + if case.result: + result_data = { + "status_id": case.result.status_id, + "comment": case.result.comment, + "elapsed": case.result.elapsed, + } + # Include steps + if case.result.custom_step_results: + steps_data = [] + for step in case.result.custom_step_results: + steps_data.append( + { + "content": step.content, + "status_id": step.status_id if hasattr(step, "status_id") else None, + } + ) + result_data["custom_step_results"] = steps_data + case_dict["result"] = result_data + cases_data.append(case_dict) + + # Serialize properties + properties_data = [] + if section.properties: + for prop in section.properties: + properties_data.append( + { + "name": prop.name, + "value": prop.value, + } + ) + + section_dict = { + "name": section.name, + "testcases": cases_data, + "properties": properties_data, + } + sections_data.append(section_dict) + + suite_dict = { + "name": suite.name, + "source": suite.source, + "testsections": sections_data, + } + suites_data.append(suite_dict) + + # Prepare JSON output + output_data = { + "suites": suites_data, + "summary": { + "total_suites": len(suites_data), + "total_sections": sum(len(suite.get("testsections", [])) for suite in suites_data), + "total_cases": sum( + len(section.get("testcases", [])) + for suite in suites_data + for section in suite.get("testsections", []) + ), + "source_file": file, + }, + } + + # Format JSON + if pretty: + json_output = json.dumps(output_data, indent=2, ensure_ascii=False) + else: + json_output = json.dumps(output_data, ensure_ascii=False) + + # Output results + if output: + # Save to file + with open(output, "w", encoding="utf-8") as f: + f.write(json_output) + environment.log(f"✓ Parsed results saved to: {output}") + environment.log(f" Total suites: {output_data['summary']['total_suites']}") + environment.log(f" Total sections: {output_data['summary']['total_sections']}") + environment.log(f" Total test cases: {output_data['summary']['total_cases']}") + else: + # Print to stdout + print(json_output) + + except FileNotFoundError: + environment.elog(FAULT_MAPPING["missing_file"]) + exit(1) + except ValueError as e: + environment.elog(f"Error parsing Gherkin file: {str(e)}") + exit(1) + except Exception as e: + environment.elog(f"Unexpected error during parsing: {str(e)}") + exit(1) diff --git a/trcli/commands/cmd_parse_junit.py b/trcli/commands/cmd_parse_junit.py index e24fcea..744ec1f 100644 --- a/trcli/commands/cmd_parse_junit.py +++ b/trcli/commands/cmd_parse_junit.py @@ -18,38 +18,35 @@ "--special-parser", metavar="", default="junit", - type=click.Choice(["junit", "saucectl"], case_sensitive=False), - help="Optional special parser option for specialized JUnit reports." + type=click.Choice(["junit", "saucectl", "bdd"], case_sensitive=False), + help="Optional special parser option for specialized JUnit reports. Use 'bdd' for BDD framework JUnit output.", ) @click.option( - "-a", "--assign", + "-a", + "--assign", "assign_failed_to", metavar="", - help="Comma-separated list of user emails to assign failed test results to." + help="Comma-separated list of user emails to assign failed test results to.", ) @click.option( "--test-run-ref", metavar="", - help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total)." -) -@click.option( - "--json-output", - is_flag=True, - help="Output reference operation results in JSON format." + help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total).", ) +@click.option("--json-output", is_flag=True, help="Output reference operation results in JSON format.") @click.option( "--update-existing-cases", type=click.Choice(["yes", "no"], case_sensitive=False), default="no", metavar="", - help="Update existing TestRail cases with values from JUnit properties (default: no)." + help="Update existing TestRail cases with values from JUnit properties (default: no).", ) @click.option( "--update-strategy", type=click.Choice(["append", "replace"], case_sensitive=False), default="append", metavar="", - help="Strategy for combining incoming values with existing case field values, whether to append or replace (default: append)." + help="Strategy for combining incoming values with existing case field values, whether to append or replace (default: append).", ) @click.pass_context @pass_environment @@ -58,13 +55,13 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.cmd = "parse_junit" environment.set_parameters(context) environment.check_for_required_parameters() - + if environment.test_run_ref is not None: validation_error = _validate_test_run_ref(environment.test_run_ref) if validation_error: environment.elog(validation_error) exit(1) - + settings.ALLOW_ELAPSED_MS = environment.allow_ms print_config(environment) try: @@ -75,20 +72,20 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): result_uploader = ResultsUploader(environment=environment, suite=suite) result_uploader.upload_results() - if run_id is None and hasattr(result_uploader, 'last_run_id'): + if run_id is None and hasattr(result_uploader, "last_run_id"): run_id = result_uploader.last_run_id - + # Collect case update results - if hasattr(result_uploader, 'case_update_results'): + if hasattr(result_uploader, "case_update_results"): case_update_results = result_uploader.case_update_results - + if environment.test_run_ref and run_id: _handle_test_run_references(environment, run_id) - + # Handle case update reporting if enabled if environment.update_existing_cases == "yes" and case_update_results is not None: _handle_case_update_reporting(environment, case_update_results) - + # Exit with error if there were case update failures (after reporting) if case_update_results.get("failed_cases"): exit(1) @@ -116,14 +113,14 @@ def _validate_test_run_ref(test_run_ref: str) -> str: """ if not test_run_ref or not test_run_ref.strip(): return "Error: --test-run-ref cannot be empty or whitespace-only" - - refs = [ref.strip() for ref in test_run_ref.split(',') if ref.strip()] + + refs = [ref.strip() for ref in test_run_ref.split(",") if ref.strip()] if not refs: return "Error: --test-run-ref contains no valid references (malformed input)" - + if len(test_run_ref) > 250: return f"Error: --test-run-ref exceeds 250 character limit ({len(test_run_ref)} characters)" - + return None @@ -135,40 +132,34 @@ def _handle_test_run_references(environment: Environment, run_id: int): from trcli.data_classes.dataclass_testrail import TestRailSuite import json - refs = [ref.strip() for ref in environment.test_run_ref.split(',') if ref.strip()] - - project_client = ProjectBasedClient( - environment=environment, - suite=TestRailSuite(name="temp", suite_id=1) - ) + refs = [ref.strip() for ref in environment.test_run_ref.split(",") if ref.strip()] + + project_client = ProjectBasedClient(environment=environment, suite=TestRailSuite(name="temp", suite_id=1)) project_client.resolve_project() - + environment.log(f"Appending references to test run {run_id}...") run_data, added_refs, skipped_refs, error_message = project_client.api_request_handler.append_run_references( run_id, refs ) - + if error_message: environment.elog(f"Error: Failed to append references: {error_message}") exit(1) - + final_refs = run_data.get("refs", "") if run_data else "" - + if environment.json_output: # JSON output - result = { - "run_id": run_id, - "added": added_refs, - "skipped": skipped_refs, - "total_references": final_refs - } + result = {"run_id": run_id, "added": added_refs, "skipped": skipped_refs, "total_references": final_refs} print(json.dumps(result, indent=2)) else: environment.log(f"References appended successfully:") environment.log(f" Run ID: {run_id}") environment.log(f" Total references: {len(final_refs.split(',')) if final_refs else 0}") environment.log(f" Newly added: {len(added_refs)} ({', '.join(added_refs) if added_refs else 'none'})") - environment.log(f" Skipped (duplicates): {len(skipped_refs)} ({', '.join(skipped_refs) if skipped_refs else 'none'})") + environment.log( + f" Skipped (duplicates): {len(skipped_refs)} ({', '.join(skipped_refs) if skipped_refs else 'none'})" + ) if final_refs: environment.log(f" All references: {final_refs}") @@ -178,24 +169,24 @@ def _handle_case_update_reporting(environment: Environment, case_update_results: Handle reporting of case update results. """ import json - + # Handle None input gracefully if case_update_results is None: return - + if environment.json_output: # JSON output for case updates result = { "summary": { "updated_cases": len(case_update_results.get("updated_cases", [])), "skipped_cases": len(case_update_results.get("skipped_cases", [])), - "failed_cases": len(case_update_results.get("failed_cases", [])) + "failed_cases": len(case_update_results.get("failed_cases", [])), }, "details": { "updated_cases": case_update_results.get("updated_cases", []), "skipped_cases": case_update_results.get("skipped_cases", []), - "failed_cases": case_update_results.get("failed_cases", []) - } + "failed_cases": case_update_results.get("failed_cases", []), + }, } print(json.dumps(result, indent=2)) else: @@ -203,13 +194,13 @@ def _handle_case_update_reporting(environment: Environment, case_update_results: updated_cases = case_update_results.get("updated_cases", []) skipped_cases = case_update_results.get("skipped_cases", []) failed_cases = case_update_results.get("failed_cases", []) - + if updated_cases or skipped_cases or failed_cases: environment.log("Case Reference Updates Summary:") environment.log(f" Updated cases: {len(updated_cases)}") environment.log(f" Skipped cases: {len(skipped_cases)}") environment.log(f" Failed cases: {len(failed_cases)}") - + if updated_cases: environment.log(" Updated case details:") for case_info in updated_cases: @@ -217,14 +208,14 @@ def _handle_case_update_reporting(environment: Environment, case_update_results: added = case_info.get("added_refs", []) skipped = case_info.get("skipped_refs", []) environment.log(f" C{case_id}: added {len(added)} refs, skipped {len(skipped)} duplicates") - + if skipped_cases: environment.log(" Skipped case details:") for case_info in skipped_cases: case_id = case_info["case_id"] reason = case_info.get("reason", "Unknown reason") environment.log(f" C{case_id}: {reason}") - + if failed_cases: environment.log(" Failed case details:") for case_info in failed_cases: diff --git a/trcli/constants.py b/trcli/constants.py index 0dc9fec..08373a7 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -5,9 +5,7 @@ missing_file="Please provide a valid path to your results file with the -f argument.", ) -PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING = dict( - missing_title="Please give your Test Run a title using the --title argument." -) +PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING = dict(missing_title="Please give your Test Run a title using the --title argument.") ADD_RUN_FAULT_MAPPING = dict( missing_title="Please give your Test Run a title using the --title argument.", @@ -58,23 +56,29 @@ f" - System Name: automation_id\n" f" - Type: Text (or String)\n" f" - Is Active: True", - proxy_connection_error= "Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available.", - proxy_authentication_failed= "Proxy authentication failed for proxy. Please verify the username and password.", - proxy_timeout= "The connection to the proxy server timed out. Please try again later or check the proxy server's availability.", - proxy_bypass_error= "Failed to bypass the proxy for host. Please check the settings.", - proxy_invalid_configuration= "The provided proxy configuration is invalid. Please check the proxy URL and format.", - ssl_error_on_proxy= "SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.", - no_proxy_match_error= "The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.", - no_suites_found= "The project {project_id} does not have any suites.", - invalid_json_response= "Received invalid response from TestRail server (HTTP {status_code}). " + proxy_connection_error="Failed to connect to the proxy server. Please check the proxy settings and ensure the server is available.", + proxy_authentication_failed="Proxy authentication failed for proxy. Please verify the username and password.", + proxy_timeout="The connection to the proxy server timed out. Please try again later or check the proxy server's availability.", + proxy_bypass_error="Failed to bypass the proxy for host. Please check the settings.", + proxy_invalid_configuration="The provided proxy configuration is invalid. Please check the proxy URL and format.", + ssl_error_on_proxy="SSL error encountered while using the HTTPS proxy. Please check the proxy's SSL certificate.", + no_proxy_match_error="The host {host} does not match any NO_PROXY rules. Ensure the correct domains or IP addresses are specified for bypassing the proxy.", + no_suites_found="The project {project_id} does not have any suites.", + invalid_json_response="Received invalid response from TestRail server (HTTP {status_code}). " "Please verify your TestRail host URL (-h) is correct and points to a valid TestRail instance. " "Response preview: {response_preview}", - invalid_api_response= "Invalid response from TestRail API: {error_details}" + invalid_api_response="Invalid response from TestRail API: {error_details}", + attachment_upload_failed="Failed to upload attachment '{file_path}' for case {case_id}: {error_message}", + attachment_too_large="Failed to upload attachment '{file_name}' for case {case_id}: " + "File size exceeds TestRail's maximum limit of 250 MB. Please reduce the file size or exclude this attachment.", ) COMMAND_FAULT_MAPPING = dict( add_run=dict(**FAULT_MAPPING, **ADD_RUN_FAULT_MAPPING), parse_junit=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), + import_gherkin=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), + export_gherkin=dict(**FAULT_MAPPING), + parse_cucumber=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), parse_openapi=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING), parse_robot=dict(**FAULT_MAPPING, **PARSE_COMMON_FAULT_MAPPING, **PARSE_JUNIT_OR_ROBOT_FAULT_MAPPING), labels=dict(**FAULT_MAPPING), @@ -97,10 +101,14 @@ Copyright 2025 Gurock Software GmbH - www.gurock.com""" TOOL_USAGE = f"""Supported and loaded modules: - parse_junit: JUnit XML Files (& Similar) + - parse_cucumber: Cucumber JSON results (BDD) + - import_gherkin: Upload .feature files to TestRail BDD + - export_gherkin: Export BDD test cases as .feature files - parse_robot: Robot Framework XML Files - parse_openapi: OpenAPI YML Files - add_run: Create a new test run - - labels: Manage labels (projects, cases, and tests)""" + - labels: Manage labels (add, update, delete, list) + - references: Manage references (cases and runs)""" MISSING_COMMAND_SLOGAN = """Usage: trcli [OPTIONS] COMMAND [ARGS]...\nTry 'trcli --help' for help. \nError: Missing command.""" @@ -128,6 +136,7 @@ class RevertMessages: run_deleted = "Deleted created run" run_not_deleted = "Unable to delete created run: {error}" + OLD_SYSTEM_NAME_AUTOMATION_ID = "custom_automation_id" # field name mismatch on testrail side (can not reproduce in cloud version TestRail v9.1.2) -UPDATED_SYSTEM_NAME_AUTOMATION_ID = "custom_case_automation_id" \ No newline at end of file +UPDATED_SYSTEM_NAME_AUTOMATION_ID = "custom_case_automation_id" diff --git a/trcli/readers/cucumber_json.py b/trcli/readers/cucumber_json.py new file mode 100644 index 0000000..4a1ffd3 --- /dev/null +++ b/trcli/readers/cucumber_json.py @@ -0,0 +1,587 @@ +import json +from pathlib import Path +from beartype.typing import List, Dict, Any, Optional, Tuple + +from trcli.cli import Environment +from trcli.data_classes.data_parsers import MatchersParser, TestRailCaseFieldsOptimizer +from trcli.data_classes.dataclass_testrail import ( + TestRailCase, + TestRailSuite, + TestRailSection, + TestRailResult, + TestRailSeparatedStep, +) +from trcli.readers.file_parser import FileParser + + +class CucumberParser(FileParser): + """Parser for Cucumber JSON results format""" + + def __init__(self, environment: Environment): + super().__init__(environment) + self.case_matcher = environment.case_matcher + + def parse_file(self) -> List[TestRailSuite]: + """Parse Cucumber JSON results file and convert to TestRailSuite structure + + Returns: + List of TestRailSuite objects with test cases and results + """ + self.env.log(f"Parsing Cucumber JSON file: {self.filename}") + + # Read and parse the JSON file + with open(self.filepath, "r", encoding="utf-8") as f: + cucumber_data = json.load(f) + + # Cucumber JSON is typically an array of features + if not isinstance(cucumber_data, list): + raise ValueError("Cucumber JSON must be an array of features") + + # Parse features into TestRail structure + sections = [] + for feature in cucumber_data: + feature_sections = self._parse_feature(feature) + sections.extend(feature_sections) + + cases_count = sum(len(section.testcases) for section in sections) + self.env.log(f"Processed {cases_count} test cases in {len(sections)} sections.") + + # Create suite + suite_name = self.env.suite_name if self.env.suite_name else "Cucumber Test Results" + testrail_suite = TestRailSuite( + name=suite_name, + testsections=sections, + source=self.filename, + ) + + return [testrail_suite] + + def _parse_feature(self, feature: Dict[str, Any]) -> List[TestRailSection]: + """Parse a single Cucumber feature into TestRail sections + + Args: + feature: Feature object from Cucumber JSON + + Returns: + List of TestRailSection objects + """ + feature_name = feature.get("name", "Untitled Feature") + feature_tags = self._extract_tags(feature.get("tags", [])) + + # Create a section for this feature + section = TestRailSection(name=feature_name, testcases=[]) + + # Parse scenarios/scenario outlines + for element in feature.get("elements", []): + element_type = element.get("type", "") + + if element_type in ("scenario", "scenario_outline"): + test_case = self._parse_scenario(element, feature_name, feature_tags) + if test_case: + section.testcases.append(test_case) + + return [section] if section.testcases else [] + + def _parse_scenario( + self, scenario: Dict[str, Any], feature_name: str, feature_tags: List[str] + ) -> Optional[TestRailCase]: + """Parse a Cucumber scenario into TestRailCase + + Args: + scenario: Scenario object from Cucumber JSON + feature_name: Name of the parent feature + feature_tags: Tags from the parent feature + + Returns: + TestRailCase object or None + """ + scenario_name = scenario.get("name", "Untitled Scenario") + scenario_tags = self._extract_tags(scenario.get("tags", [])) + all_tags = feature_tags + scenario_tags + + # Build automation ID + automation_id = self._build_automation_id(feature_name, all_tags, scenario_name) + + # Extract case ID if using matcher + case_id = None + if self.case_matcher == MatchersParser.NAME: + case_id, scenario_name = MatchersParser.parse_name_with_id(scenario_name) + elif self.case_matcher == MatchersParser.PROPERTY: + # Look for @C tag pattern + for tag in all_tags: + if tag.startswith("@C") or tag.startswith("@c"): + try: + case_id = int(tag[2:]) + break + except ValueError: + pass + + # Parse steps and determine overall status + steps = scenario.get("steps", []) + step_results, overall_status = self._parse_steps(steps) + + # Calculate elapsed time + elapsed_time = self._calculate_elapsed_time(steps) + + # Build comment from failures + comment = self._build_comment_from_failures(steps) + + # Create result object + result = TestRailResult( + case_id=case_id, + status_id=overall_status, + comment=comment, + elapsed=elapsed_time, + custom_step_results=step_results, + ) + + # Create test case + test_case = TestRailCase( + title=TestRailCaseFieldsOptimizer.extract_last_words( + scenario_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), + case_id=case_id, + result=result, + custom_automation_id=automation_id, + case_fields={"tags": ", ".join(all_tags)} if all_tags else {}, + ) + + return test_case + + def _parse_steps(self, steps: List[Dict[str, Any]]) -> tuple: + """Parse Cucumber steps into TestRail step results + + Args: + steps: List of step objects from Cucumber JSON + + Returns: + Tuple of (list of TestRailSeparatedStep, overall_status_id) + """ + step_results = [] + overall_status = 1 # Passed by default + + for step in steps: + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + step_content = f"{keyword} {step_name}".strip() + + # Determine step status + result = step.get("result", {}) + result_status = result.get("status", "").lower() + + # Map Cucumber status to TestRail status ID + # 1=Passed, 3=Untested, 4=Skipped, 5=Failed + if result_status == "passed": + step_status_id = 1 + elif result_status == "failed": + step_status_id = 5 + overall_status = 5 # Test failed + elif result_status == "skipped": + step_status_id = 4 + if overall_status == 1: # Only update if not already failed + overall_status = 4 + elif result_status == "pending": + step_status_id = 3 + if overall_status == 1: + overall_status = 3 + elif result_status == "undefined": + step_status_id = 3 + if overall_status == 1: + overall_status = 3 + else: + step_status_id = 3 + + # Create step result + tr_step = TestRailSeparatedStep(content=step_content) + tr_step.status_id = step_status_id + step_results.append(tr_step) + + return step_results, overall_status + + def _calculate_elapsed_time(self, steps: List[Dict[str, Any]]) -> Optional[str]: + """Calculate total elapsed time from steps + + Args: + steps: List of step objects + + Returns: + Elapsed time string or None + """ + total_duration = 0 + for step in steps: + result = step.get("result", {}) + duration = result.get("duration", 0) + if duration: + total_duration += duration + + if total_duration > 0: + # Convert nanoseconds to seconds + total_seconds = total_duration / 1_000_000_000 + # Always return at least 1s if there was any duration + if total_seconds >= 1: + return f"{round(total_seconds)}s" + else: + return "1s" + + return None + + def _build_comment_from_failures(self, steps: List[Dict[str, Any]]) -> str: + """Build comment string from failed steps + + Args: + steps: List of step objects + + Returns: + Comment string describing failures + """ + failures = [] + for step in steps: + result = step.get("result", {}) + if result.get("status", "").lower() == "failed": + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + error_message = result.get("error_message", "") + + failure_text = f"Failed: {keyword} {step_name}" + if error_message: + failure_text += f"\n Error: {error_message}" + + failures.append(failure_text) + + return "\n\n".join(failures) if failures else "" + + def _extract_tags(self, tags: List[Dict[str, str]]) -> List[str]: + """Extract tag names from Cucumber tag objects + + Args: + tags: List of tag objects with 'name' field + + Returns: + List of tag name strings + """ + return [tag.get("name", "") for tag in tags if tag.get("name")] + + def _build_automation_id(self, feature_name: str, tags: List[str], scenario_name: str) -> str: + """Build automation ID from feature, tags, and scenario name + + Args: + feature_name: Feature name + tags: List of tags + scenario_name: Scenario name + + Returns: + Automation ID string + """ + parts = [feature_name] + + # Add tags if present + if tags: + parts.extend(tags) + + # Add scenario name + parts.append(scenario_name) + + return ".".join(parts) + + def generate_feature_file(self) -> str: + """Generate .feature file content from parsed Cucumber JSON + + This reconstructs Gherkin syntax from the Cucumber JSON results. + Useful for creating/updating BDD test cases in TestRail. + + Returns: + Feature file content as string + """ + with open(self.filepath, "r", encoding="utf-8") as f: + cucumber_data = json.load(f) + + if not isinstance(cucumber_data, list) or not cucumber_data: + return "" + + # Generate feature files (one per feature in JSON) + feature_files = [] + + for feature in cucumber_data: + feature_content = self._generate_feature_content(feature) + if feature_content: + feature_files.append(feature_content) + + return "\n\n".join(feature_files) + + def generate_scenario_gherkin(self, feature: Dict[str, Any], scenario: Dict[str, Any]) -> Tuple[str, List[str]]: + """Generate Gherkin content for a single scenario with feature context + + This creates a complete .feature file containing just one scenario, + including the feature header, tags, and description. + + Args: + feature: Feature object from Cucumber JSON + scenario: Scenario object from Cucumber JSON + + Returns: + Tuple of (gherkin_content, all_tags) + - gherkin_content: Complete Gherkin .feature file for single scenario + - all_tags: List of all tags (feature + scenario) + """ + lines = [] + + # Collect all tags (feature + scenario) + feature_tags = self._extract_tags(feature.get("tags", [])) + scenario_tags = self._extract_tags(scenario.get("tags", [])) + all_tags = feature_tags + scenario_tags + + # Feature tags + if feature_tags: + lines.append(" ".join(feature_tags)) + + # Feature header + feature_name = feature.get("name", "Untitled Feature") + feature_description = feature.get("description", "") + + lines.append(f"Feature: {feature_name}") + if feature_description: + for desc_line in feature_description.split("\n"): + if desc_line.strip(): + lines.append(f" {desc_line.strip()}") + + lines.append("") # Empty line after feature header + + # Background (if exists in feature) - include for context + background = None + for element in feature.get("elements", []): + if element.get("type") == "background": + background = element + break + + if background: + background_content = self._generate_background_content(background) + if background_content: + lines.append(background_content) + lines.append("") + + # Scenario tags + if scenario_tags: + lines.append(" " + " ".join(scenario_tags)) + + # Scenario content + scenario_type = scenario.get("type", "scenario") + scenario_name = scenario.get("name", "Untitled Scenario") + + if scenario_type == "scenario_outline": + lines.append(f" Scenario Outline: {scenario_name}") + else: + lines.append(f" Scenario: {scenario_name}") + + # Steps + for step in scenario.get("steps", []): + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + lines.append(f" {keyword} {step_name}") + + # Examples table (for Scenario Outline) + if scenario_type == "scenario_outline": + examples = scenario.get("examples", []) + if examples: + for example_group in examples: + lines.append("") # Empty line before examples + + # Examples tags (if any) + example_tags = self._extract_tags(example_group.get("tags", [])) + if example_tags: + lines.append(" " + " ".join(example_tags)) + + # Examples keyword + lines.append(" Examples:") + + # Examples table + rows = example_group.get("rows", []) + if rows: + for row in rows: + cells = row.get("cells", []) + if cells: + row_content = " | ".join(cells) + lines.append(f" | {row_content} |") + + return "\n".join(lines), all_tags + + def _generate_feature_content(self, feature: Dict[str, Any]) -> str: + """Generate Gherkin feature content from Cucumber feature object + + Args: + feature: Feature object from Cucumber JSON + + Returns: + Gherkin feature content as string + """ + lines = [] + + # Feature tags + feature_tags = self._extract_tags(feature.get("tags", [])) + if feature_tags: + lines.append(" ".join(feature_tags)) + + # Feature header + feature_name = feature.get("name", "Untitled Feature") + feature_description = feature.get("description", "") + + lines.append(f"Feature: {feature_name}") + if feature_description: + for desc_line in feature_description.split("\n"): + if desc_line.strip(): + lines.append(f" {desc_line.strip()}") + + lines.append("") # Empty line after feature header + + # Process elements in order: Background first, then scenarios/rules + for element in feature.get("elements", []): + element_type = element.get("type", "") + + if element_type == "background": + background_content = self._generate_background_content(element) + if background_content: + lines.append(background_content) + lines.append("") # Empty line after background + + elif element_type in ("scenario", "scenario_outline"): + scenario_content = self._generate_scenario_content(element) + if scenario_content: + lines.append(scenario_content) + lines.append("") # Empty line between scenarios + + elif element_type == "rule": + rule_content = self._generate_rule_content(element) + if rule_content: + lines.append(rule_content) + lines.append("") # Empty line after rule + + return "\n".join(lines) + + def _generate_scenario_content(self, scenario: Dict[str, Any]) -> str: + """Generate Gherkin scenario content + + Args: + scenario: Scenario object from Cucumber JSON + + Returns: + Gherkin scenario content as string + """ + lines = [] + + # Scenario tags + scenario_tags = self._extract_tags(scenario.get("tags", [])) + if scenario_tags: + lines.append(" " + " ".join(scenario_tags)) + + # Scenario header + scenario_type = scenario.get("type", "scenario") + scenario_name = scenario.get("name", "Untitled Scenario") + + if scenario_type == "scenario_outline": + lines.append(f" Scenario Outline: {scenario_name}") + else: + lines.append(f" Scenario: {scenario_name}") + + # Steps + for step in scenario.get("steps", []): + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + lines.append(f" {keyword} {step_name}") + + # Examples table (for Scenario Outline) + if scenario_type == "scenario_outline": + examples = scenario.get("examples", []) + if examples: + for example_group in examples: + lines.append("") # Empty line before examples + + # Examples tags (if any) + example_tags = self._extract_tags(example_group.get("tags", [])) + if example_tags: + lines.append(" " + " ".join(example_tags)) + + # Examples keyword + lines.append(" Examples:") + + # Examples table + rows = example_group.get("rows", []) + if rows: + for row in rows: + cells = row.get("cells", []) + if cells: + row_content = " | ".join(cells) + lines.append(f" | {row_content} |") + + return "\n".join(lines) + + def _generate_background_content(self, background: Dict[str, Any]) -> str: + """Generate Gherkin background content + + Args: + background: Background object from Cucumber JSON + + Returns: + Gherkin background content as string + """ + lines = [] + + # Background header + background_name = background.get("name", "") + if background_name: + lines.append(f" Background: {background_name}") + else: + lines.append(" Background:") + + # Steps + for step in background.get("steps", []): + keyword = step.get("keyword", "").strip() + step_name = step.get("name", "") + lines.append(f" {keyword} {step_name}") + + return "\n".join(lines) + + def _generate_rule_content(self, rule: Dict[str, Any]) -> str: + """Generate Gherkin rule content + + Args: + rule: Rule object from Cucumber JSON + + Returns: + Gherkin rule content as string + """ + lines = [] + + # Rule tags (if any) + rule_tags = self._extract_tags(rule.get("tags", [])) + if rule_tags: + lines.append(" " + " ".join(rule_tags)) + + # Rule header + rule_name = rule.get("name", "Untitled Rule") + lines.append(f" Rule: {rule_name}") + + # Rule description (if any) + rule_description = rule.get("description", "") + if rule_description: + for desc_line in rule_description.split("\n"): + if desc_line.strip(): + lines.append(f" {desc_line.strip()}") + + # Process children in order: Background first, then scenarios + for element in rule.get("children", []): + element_type = element.get("type", "") + + if element_type == "background": + lines.append("") + background_content = self._generate_background_content(element) + # Indent background under rule + for line in background_content.split("\n"): + lines.append(" " + line if line else "") + + elif element_type in ("scenario", "scenario_outline"): + lines.append("") + scenario_content = self._generate_scenario_content(element) + # Indent scenario under rule + for line in scenario_content.split("\n"): + lines.append(" " + line if line else "") + + return "\n".join(lines) diff --git a/trcli/readers/gherkin_parser.py b/trcli/readers/gherkin_parser.py new file mode 100644 index 0000000..fe980b4 --- /dev/null +++ b/trcli/readers/gherkin_parser.py @@ -0,0 +1,268 @@ +from pathlib import Path +from beartype.typing import List, Dict, Any, Optional +from gherkin.parser import Parser +from gherkin.token_scanner import TokenScanner + +from trcli.cli import Environment +from trcli.data_classes.data_parsers import MatchersParser, TestRailCaseFieldsOptimizer +from trcli.data_classes.dataclass_testrail import ( + TestRailCase, + TestRailSuite, + TestRailSection, + TestRailProperty, + TestRailResult, + TestRailSeparatedStep, +) +from trcli.readers.file_parser import FileParser + + +class GherkinParser(FileParser): + """Parser for Gherkin .feature files""" + + def __init__(self, environment: Environment): + super().__init__(environment) + self.case_matcher = environment.case_matcher + + def parse_file(self) -> List[TestRailSuite]: + """Parse a Gherkin .feature file and convert to TestRailSuite structure""" + self.env.log(f"Parsing Gherkin feature file: {self.filename}") + + # Read and parse the feature file + with open(self.filepath, "r", encoding="utf-8") as f: + feature_text = f.read() + + parser = Parser() + scanner = TokenScanner(feature_text) + gherkin_document = parser.parse(scanner) + + # Extract feature + feature = gherkin_document.get("feature") + if not feature: + raise ValueError("No feature found in the Gherkin file") + + # Parse feature into TestRail structure + suite_name = self.env.suite_name if self.env.suite_name else feature.get("name", self.filepath.stem) + sections = self._parse_feature_children(feature) + + cases_count = sum(len(section.testcases) for section in sections) + self.env.log(f"Processed {cases_count} test cases in {len(sections)} sections.") + + testrail_suite = TestRailSuite( + name=suite_name, + testsections=sections, + source=self.filename, + ) + + return [testrail_suite] + + def _parse_feature_children(self, feature: Dict[str, Any]) -> List[TestRailSection]: + """Parse feature children (Background, Scenarios, Scenario Outlines) into sections""" + sections = [] + background_steps = None + + # First pass: extract background if present + for child in feature.get("children", []): + if "background" in child: + background_steps = self._extract_steps(child["background"]) + break + + # Group scenarios into a single section (using feature name) + feature_name = feature.get("name", "Feature") + section = TestRailSection(name=feature_name, testcases=[]) + + # Store background as section property if exists + if background_steps: + background_text = "\n".join([f"{step['keyword']}{step['text']}" for step in background_steps]) + section.properties = [TestRailProperty(name="background", value=background_text)] + + # Second pass: process scenarios + for child in feature.get("children", []): + if "scenario" in child: + scenario = child["scenario"] + # Check if it's a Scenario Outline + if scenario.get("keyword") == "Scenario Outline": + # Expand scenario outline into multiple test cases + test_cases = self._parse_scenario_outline(scenario, feature_name) + section.testcases.extend(test_cases) + else: + # Regular scenario + test_case = self._parse_scenario(scenario, feature_name) + if test_case: + section.testcases.append(test_case) + + if section.testcases: + sections.append(section) + + return sections + + def _parse_scenario(self, scenario: Dict[str, Any], feature_name: str) -> Optional[TestRailCase]: + """Parse a single Gherkin scenario into a TestRailCase""" + scenario_name = scenario.get("name", "Untitled Scenario") + tags = self._extract_tags(scenario) + steps = self._extract_steps(scenario) + + # Extract case ID if using name or property matcher + case_id = None + if self.case_matcher == MatchersParser.NAME: + case_id, scenario_name = MatchersParser.parse_name_with_id(scenario_name) + elif self.case_matcher == MatchersParser.PROPERTY: + # Look for @C tag pattern + for tag in tags: + if tag.startswith("@C") or tag.startswith("@c"): + try: + case_id = int(tag[2:]) + break + except ValueError: + pass + + # Create automation ID from feature, tags, and scenario name + # Format: "feature_name.@tag1.@tag2.scenario_name" + tag_part = ".".join(tags) if tags else "" + automation_id = f"{feature_name}.{tag_part}.{scenario_name}" if tag_part else f"{feature_name}.{scenario_name}" + + # Convert Gherkin steps to TestRail separated steps + step_results = [] + for step in steps: + step_content = f"{step['keyword']}{step['text']}" + tr_step = TestRailSeparatedStep(content=step_content) + tr_step.status_id = 3 # Untested by default + step_results.append(tr_step) + + # Create result object + result = TestRailResult( + case_id=case_id, + status_id=3, # Untested (no execution results yet) + comment=f"Gherkin scenario with {len(steps)} steps", + custom_step_results=step_results, + ) + + # Create test case + test_case = TestRailCase( + title=TestRailCaseFieldsOptimizer.extract_last_words( + scenario_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), + case_id=case_id, + result=result, + custom_automation_id=automation_id, + case_fields={"tags": ", ".join(tags)} if tags else {}, + ) + + return test_case + + def _parse_scenario_outline(self, scenario_outline: Dict[str, Any], feature_name: str) -> List[TestRailCase]: + """Parse a Scenario Outline into multiple TestRailCases (one per example row)""" + test_cases = [] + outline_name = scenario_outline.get("name", "Untitled Outline") + tags = self._extract_tags(scenario_outline) + steps = self._extract_steps(scenario_outline) + examples = scenario_outline.get("examples", []) + + if not examples: + # No examples, treat as regular scenario + test_case = self._parse_scenario(scenario_outline, feature_name) + if test_case: + return [test_case] + + # Process each example table + for example_table in examples: + table_header = example_table.get("tableHeader", {}) + table_body = example_table.get("tableBody", []) + + # Get column names from header + header_cells = table_header.get("cells", []) + column_names = [cell.get("value", "") for cell in header_cells] + + # Create a test case for each row + for row_idx, row in enumerate(table_body, start=1): + row_cells = row.get("cells", []) + row_values = [cell.get("value", "") for cell in row_cells] + + # Create parameter mapping + params = dict(zip(column_names, row_values)) + + # Replace placeholders in scenario name + scenario_name = self._replace_placeholders(outline_name, params) + scenario_name = f"{outline_name} [Example {row_idx}]" + + # Replace placeholders in steps + instantiated_steps = [] + for step in steps: + step_text = self._replace_placeholders(step["text"], params) + instantiated_steps.append( + {"keyword": step["keyword"], "text": step_text, "keywordType": step.get("keywordType")} + ) + + # Create automation ID + tag_part = ".".join(tags) if tags else "" + automation_id = ( + f"{feature_name}.{tag_part}.{outline_name}.example_{row_idx}" + if tag_part + else f"{feature_name}.{outline_name}.example_{row_idx}" + ) + + # Convert steps to TestRail format + step_results = [] + for step in instantiated_steps: + step_content = f"{step['keyword']}{step['text']}" + tr_step = TestRailSeparatedStep(content=step_content) + tr_step.status_id = 3 # Untested + step_results.append(tr_step) + + # Create result + result = TestRailResult( + case_id=None, + status_id=3, + comment=f"Scenario Outline example {row_idx}: {params}", + custom_step_results=step_results, + ) + + # Create test case + test_case = TestRailCase( + title=TestRailCaseFieldsOptimizer.extract_last_words( + scenario_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), + case_id=None, + result=result, + custom_automation_id=automation_id, + case_fields=( + {"tags": ", ".join(tags), "example_params": str(params)} + if tags + else {"example_params": str(params)} + ), + ) + + test_cases.append(test_case) + + return test_cases + + @staticmethod + def _extract_tags(scenario: Dict[str, Any]) -> List[str]: + """Extract tags from a scenario""" + tags = [] + for tag in scenario.get("tags", []): + tag_name = tag.get("name", "") + if tag_name: + tags.append(tag_name) + return tags + + @staticmethod + def _extract_steps(scenario_or_background: Dict[str, Any]) -> List[Dict[str, Any]]: + """Extract steps from a scenario or background""" + steps = [] + for step in scenario_or_background.get("steps", []): + steps.append( + { + "keyword": step.get("keyword", ""), + "text": step.get("text", ""), + "keywordType": step.get("keywordType", ""), + } + ) + return steps + + @staticmethod + def _replace_placeholders(text: str, params: Dict[str, str]) -> str: + """Replace with actual values from params""" + result = text + for key, value in params.items(): + result = result.replace(f"<{key}>", value) + return result diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index 2218c31..e6f85a7 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -4,8 +4,7 @@ from unittest import TestCase, TestSuite from xml.etree import ElementTree as etree -from junitparser import ( - JUnitXml, JUnitXmlError, Element, Attr, TestSuite as JUnitTestSuite, TestCase as JUnitTestCase) +from junitparser import JUnitXml, JUnitXmlError, Element, Attr, TestSuite as JUnitTestSuite, TestCase as JUnitTestCase from trcli.cli import Environment from trcli.constants import OLD_SYSTEM_NAME_AUTOMATION_ID @@ -15,16 +14,12 @@ TestRailSuite, TestRailSection, TestRailProperty, - TestRailResult, TestRailSeparatedStep, + TestRailResult, + TestRailSeparatedStep, ) from trcli.readers.file_parser import FileParser -STEP_STATUSES = { - "passed": 1, - "untested": 3, - "skipped": 4, - "failed": 5 -} +STEP_STATUSES = {"passed": 1, "untested": 3, "skipped": 4, "failed": 5} TestCase.id = Attr("id") TestSuite.id = Attr("id") @@ -47,7 +42,7 @@ def __init__(self, environment: Environment): super().__init__(environment) self._case_matcher = environment.case_matcher self._special = environment.special_parser - self._case_result_statuses = {"passed": 1, "skipped": 4,"error": 5, "failure": 5} + self._case_result_statuses = {"passed": 1, "skipped": 4, "error": 5, "failure": 5} self._update_with_custom_statuses() @classmethod @@ -134,7 +129,7 @@ def _get_comment_for_case_result(case: JUnitTestCase) -> str: parts = [ f"Type: {result.type}" if result.type else "", f"Message: {result.message}" if result.message else "", - f"Text: {result.text}" if result.text else "" + f"Text: {result.text}" if result.text else "", ] return "\n".join(part for part in parts if part).strip() @@ -155,7 +150,7 @@ def _parse_case_properties(case): continue elif name.startswith("testrail_result_step"): - status, step = value.split(':', maxsplit=1) + status, step = value.split(":", maxsplit=1) step_obj = TestRailSeparatedStep(step.strip()) step_obj.status_id = STEP_STATUSES[status.lower().strip()] result_steps.append(step_obj) @@ -169,7 +164,7 @@ def _parse_case_properties(case): text = prop._elem.text.strip() if prop._elem.text else None field_value = text or value case_fields.append(field_value) - + # Extract refs for case updates if field_value and field_value.startswith("refs:"): case_refs = field_value[5:].strip() # Remove "refs:" prefix @@ -201,8 +196,9 @@ def _parse_test_cases(self, section) -> List[TestRailCase]: """ automation_id = f"{case.classname}.{case.name}" case_id, case_name = self._extract_case_id_and_name(case) - result_steps, attachments, result_fields, comments, case_fields, case_refs, sauce_session = self._parse_case_properties( - case) + result_steps, attachments, result_fields, comments, case_fields, case_refs, sauce_session = ( + self._parse_case_properties(case) + ) result_fields_dict, case_fields_dict = self._resolve_case_fields(result_fields, case_fields) status_id = self._get_status_id_for_case_result(case) comment = self._get_comment_for_case_result(case) @@ -221,30 +217,31 @@ def _parse_test_cases(self, section) -> List[TestRailCase]: if sauce_session: result.prepend_comment(f"SauceLabs session: {sauce_session}") - automation_id = ( - case_fields_dict.pop(OLD_SYSTEM_NAME_AUTOMATION_ID, None) - or case._elem.get(OLD_SYSTEM_NAME_AUTOMATION_ID, automation_id)) + automation_id = case_fields_dict.pop(OLD_SYSTEM_NAME_AUTOMATION_ID, None) or case._elem.get( + OLD_SYSTEM_NAME_AUTOMATION_ID, automation_id + ) # Create TestRailCase kwargs case_kwargs = { - "title": TestRailCaseFieldsOptimizer.extract_last_words(case_name, - TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH), + "title": TestRailCaseFieldsOptimizer.extract_last_words( + case_name, TestRailCaseFieldsOptimizer.MAX_TESTCASE_TITLE_LENGTH + ), "case_id": case_id, "result": result, "custom_automation_id": automation_id, "case_fields": case_fields_dict, } - + # Only set refs field if case_refs has actual content if case_refs and case_refs.strip(): case_kwargs["refs"] = case_refs - + test_case = TestRailCase(**case_kwargs) - + # Store JUnit references as a temporary attribute for case updates (not serialized) if case_refs and case_refs.strip(): test_case._junit_case_refs = case_refs - + test_cases.append(test_case) return test_cases @@ -256,7 +253,6 @@ def _get_suite_name(self, suite): return suite.name raise ValueError("Suite name is not defined in environment or JUnit report.") - def _parse_sections(self, suite) -> List[TestRailSection]: sections = [] processed_props = [] @@ -272,16 +268,424 @@ def _parse_sections(self, suite) -> List[TestRailSection]: then sub_sections=sub_sections """ properties = self._extract_section_properties(section, processed_props) - test_cases = self._parse_test_cases(section) + + # BDD MODE: Group all scenarios under one test case + if self._is_bdd_mode(): + test_case = self._parse_bdd_feature_as_single_case(section) + test_cases = [test_case] if test_case else [] + # STANDARD MODE: One test case per JUnit testcase + else: + test_cases = self._parse_test_cases(section) + self.env.log(f"Processed {len(test_cases)} test cases in section {section.name}.") - sections.append(TestRailSection( - section.name, - testcases=test_cases, - properties=properties, - )) + sections.append( + TestRailSection( + section.name, + testcases=test_cases, + properties=properties, + ) + ) return sections + def _is_bdd_mode(self) -> bool: + """Check if BDD grouping mode is enabled + + Returns: + True if special parser is 'bdd', False otherwise + """ + return self._special == "bdd" + + def _extract_feature_case_id_from_property(self, testsuite) -> Union[int, None]: + """Extract case ID from testsuite-level properties + + Looks for properties: testrail_case_id, test_id, bdd_case_id + + Args: + testsuite: JUnit testsuite element + + Returns: + Case ID as integer or None if not found + """ + for prop in testsuite.properties(): + if prop.name in ["testrail_case_id", "test_id", "bdd_case_id"]: + case_id_str = prop.value.lower().replace("c", "") + if case_id_str.isnumeric(): + self.env.vlog(f"BDD: Found case ID C{case_id_str} in testsuite property '{prop.name}'") + return int(case_id_str) + return None + + def _extract_case_id_from_testcases(self, testsuite) -> List[tuple]: + """Extract case IDs from testcase properties and names + + Args: + testsuite: JUnit testsuite element + + Returns: + List of tuples (testcase_name, case_id) + """ + testcase_case_ids = [] + + for testcase in testsuite: + tc_case_id = None + + # Check testcase properties first + for case_props in testcase.iterchildren(Properties): + for prop in case_props.iterchildren(Property): + if prop.name == "test_id": + tc_case_id_str = prop.value.lower().replace("c", "") + if tc_case_id_str.isnumeric(): + tc_case_id = int(tc_case_id_str) + break + + # Check testcase name if property not found + if not tc_case_id: + tc_case_id, _ = MatchersParser.parse_name_with_id(testcase.name) + + if tc_case_id: + testcase_case_ids.append((testcase.name, tc_case_id)) + + return testcase_case_ids + + def _extract_and_validate_bdd_case_id(self, testsuite) -> tuple: + """Extract case ID from various sources and validate consistency + + In BDD mode, all scenarios in a feature MUST share the same case ID. + + Priority order: + 1. Testsuite-level property (testrail_case_id, test_id, bdd_case_id) + 2. Testcase properties (all must be same) + 3. Testcase names (all must be same) + 4. Testsuite name pattern [C123] + + Args: + testsuite: JUnit testsuite element + + Returns: + Tuple of (case_id: int or None, validation_errors: List[str]) + """ + validation_errors = [] + + # Priority 1: Testsuite-level property + case_id = self._extract_feature_case_id_from_property(testsuite) + if case_id: + return case_id, [] + + # Priority 2 & 3: Check testcase properties and names + testcase_case_ids = self._extract_case_id_from_testcases(testsuite) + + if not testcase_case_ids: + validation_errors.append( + f"BDD Error: No case ID found for feature '{testsuite.name}'.\n" + f" Add case ID using one of:\n" + f" - Testsuite property: \n" + f" - Testcase names: 'Scenario name C42'\n" + f" - Testcase property: " + ) + return None, validation_errors + + # Check consistency - all must be the same + unique_case_ids = set(cid for _, cid in testcase_case_ids) + + if len(unique_case_ids) > 1: + validation_errors.append( + f"BDD Error: Multiple different case IDs found in feature '{testsuite.name}'.\n" + f" In BDD mode, all scenarios must map to the SAME TestRail case.\n" + f" Found case IDs: {sorted(unique_case_ids)}\n" + f" Scenarios:\n" + + "\n".join(f" - '{name}' → C{cid}" for name, cid in testcase_case_ids) + + f"\n\n If these should be separate test cases, remove --special-parser bdd flag." + ) + return None, validation_errors + + case_id = testcase_case_ids[0][1] + self.env.vlog( + f"BDD: Found consistent case ID C{case_id} across {len(testcase_case_ids)} scenario(s) " + f"in feature '{testsuite.name}'" + ) + + # Priority 4: Check testsuite name if no testcase IDs found + if not case_id and self._case_matcher == MatchersParser.NAME: + case_id, _ = MatchersParser.parse_name_with_id(testsuite.name) + if case_id: + self.env.vlog(f"BDD: Found case ID C{case_id} in testsuite name") + + return case_id, [] + + def _validate_bdd_case_exists(self, case_id: int, feature_name: str) -> tuple: + """Validate that case exists in TestRail AND is a BDD test case + + A valid BDD test case MUST have: + - Exist in TestRail (case ID is valid) + - Have custom_testrail_bdd_scenario field with content + + Args: + case_id: TestRail case ID to validate + feature_name: Feature/testsuite name for error context + + Returns: + Tuple of (is_valid: bool, error_message: str, case_data: dict) + """ + try: + # Import here to avoid circular dependency + from trcli.api.api_request_handler import ApiRequestHandler + from trcli.api.project_based_client import ProjectBasedClient + from trcli.data_classes.dataclass_testrail import TestRailSuite + + # Get API client + temp_suite = TestRailSuite(name="temp", suite_id=1) + project_client = ProjectBasedClient(environment=self.env, suite=temp_suite) + api_handler = project_client.api_request_handler + + # Step 1: Get case from TestRail + response = api_handler.client.send_get(f"get_case/{case_id}") + + if response.error_message: + return ( + False, + ( + f"BDD Validation Error: Case C{case_id} does not exist in TestRail.\n" + f"Feature: '{feature_name}'\n" + f"API Error: {response.error_message}\n\n" + f"Action Required:\n" + f" 1. Verify case C{case_id} exists in TestRail\n" + f" 2. Ensure you have permission to access this case\n" + f" 3. Create the BDD test case if it doesn't exist:\n" + f" trcli import_gherkin -f {feature_name}.feature --section-id " + ), + {}, + ) + + case_data = response.response_text + + # Step 2: Validate it's a BDD test case + bdd_scenario_field = case_data.get("custom_testrail_bdd_scenario") + + if not bdd_scenario_field: + return ( + False, + ( + f"BDD Validation Error: Case C{case_id} is NOT a BDD test case.\n" + f"Feature: '{feature_name}'\n" + f"Case Title: '{case_data.get('title', 'Unknown')}'\n\n" + f"Reason: The 'custom_testrail_bdd_scenario' field is empty or null.\n" + f"This indicates the case is using a regular template, not the BDD template.\n\n" + f"Action Required:\n" + f" Option 1: Upload this case using standard mode (remove --special-parser bdd)\n" + f" Option 2: Create a proper BDD test case with:\n" + f" trcli import_gherkin -f {feature_name}.feature --section-id \n" + f" Option 3: Convert existing case to BDD template in TestRail UI" + ), + case_data, + ) + + # Success! + self.env.vlog( + f"BDD: Validated case C{case_id} is a valid BDD test case\n" + f" - Title: '{case_data.get('title')}'\n" + f" - Template ID: {case_data.get('template_id')}\n" + f" - Has BDD scenarios: Yes" + ) + + return True, "", case_data + + except Exception as e: + return ( + False, + ( + f"BDD Validation Error: Unable to validate case C{case_id}.\n" + f"Feature: '{feature_name}'\n" + f"Error: {str(e)}\n\n" + f"Action Required: Verify your TestRail connection and case access permissions." + ), + {}, + ) + + def _aggregate_scenario_statuses(self, scenario_statuses: List[int]) -> int: + """Aggregate scenario statuses using fail-fast logic + + Fail-fast logic: + - If ANY scenario is Failed (5) → Feature is Failed (5) + - Else if ANY scenario is Skipped (4) → Feature is Skipped (4) + - Else if ALL scenarios Passed (1) → Feature is Passed (1) + + Args: + scenario_statuses: List of TestRail status IDs + + Returns: + Aggregated status ID + """ + if 5 in scenario_statuses: # Any failure + return 5 + elif 4 in scenario_statuses: # Any skipped (no failures) + return 4 + else: # All passed + return 1 + + def _format_failure_message(self, scenario_name: str, result_obj) -> str: + """Format failure details for comment + + Args: + scenario_name: Clean scenario name + result_obj: JUnit result object (failure/error element) + + Returns: + Formatted failure message + """ + lines = [f"Scenario: {scenario_name}"] + + if result_obj.type: + lines.append(f" Type: {result_obj.type}") + + if result_obj.message: + lines.append(f" Message: {result_obj.message}") + + if result_obj.text: + # Truncate if too long + text = result_obj.text.strip() + if len(text) > 500: + text = text[:500] + "\n ... (truncated)" + lines.append(f" Details:\n {text}") + + return "\n".join(lines) + + def _parse_bdd_feature_as_single_case(self, testsuite) -> Union[TestRailCase, None]: + """Parse all scenarios in a testsuite as a single BDD test case + + Enhanced validation: + 1. Case ID exists + 2. All scenarios have same case ID + 3. Case exists in TestRail + 4. Case is actually a BDD test case (has custom_testrail_bdd_scenario) + + Args: + testsuite: JUnit testsuite containing multiple scenarios + + Returns: + Single TestRailCase with aggregated scenario results, or None if validation fails + """ + feature_name = testsuite.name + + # Step 1: Extract and validate case ID consistency + case_id, validation_errors = self._extract_and_validate_bdd_case_id(testsuite) + + if validation_errors: + for error in validation_errors: + self.env.elog(error) + return None + + if not case_id: + self.env.elog(f"BDD Error: No valid case ID found for feature '{feature_name}'. " f"Skipping this feature.") + return None + + # Step 2: Validate case exists AND is a BDD case + is_valid, error_message, case_data = self._validate_bdd_case_exists(case_id, feature_name) + + if not is_valid: + self.env.elog(error_message) + # Raise exception to stop processing + from trcli.data_classes.validation_exception import ValidationException + + raise ValidationException( + field_name="case_id", + class_name="BDD Feature", + reason=f"Case C{case_id} validation failed. See error above for details.", + ) + + self.env.log(f"BDD: Case C{case_id} validated as BDD test case for feature '{feature_name}'") + + # Step 3: Parse all scenarios + scenarios = [] + scenario_statuses = [] + total_time = 0 + failure_messages = [] + + for idx, testcase in enumerate(testsuite, 1): + scenario_name = testcase.name + # Clean case ID from name + _, clean_scenario_name = MatchersParser.parse_name_with_id(scenario_name) + if not clean_scenario_name: + clean_scenario_name = scenario_name + + scenario_time = float(testcase.time or 0) + total_time += scenario_time + + # Determine scenario status + if testcase.is_passed: + scenario_status = 1 + scenario_status_label = "PASSED" + elif testcase.is_skipped: + scenario_status = 4 + scenario_status_label = "SKIPPED" + else: # Failed + scenario_status = 5 + scenario_status_label = "FAILED" + + # Capture failure details + if testcase.result: + result_obj = testcase.result[0] + error_msg = self._format_failure_message(clean_scenario_name, result_obj) + failure_messages.append(error_msg) + + # Track status for aggregation + scenario_statuses.append(scenario_status) + + # Create step result for this scenario + step = TestRailSeparatedStep(content=f"Scenario {idx}: {clean_scenario_name}") + step.status_id = scenario_status + scenarios.append(step) + + self.env.vlog(f" - Scenario {idx}: {clean_scenario_name} → {scenario_status_label} " f"({scenario_time}s)") + + # Step 4: Aggregate statuses + overall_status = self._aggregate_scenario_statuses(scenario_statuses) + + status_labels = {1: "PASSED", 4: "SKIPPED", 5: "FAILED"} + overall_status_label = status_labels.get(overall_status, "UNKNOWN") + + # Step 5: Create comment with summary + passed_count = scenario_statuses.count(1) + failed_count = scenario_statuses.count(5) + skipped_count = scenario_statuses.count(4) + total_count = len(scenario_statuses) + + summary = ( + f"Feature Summary:\n" + f" Total Scenarios: {total_count}\n" + f" Passed: {passed_count}\n" + f" Failed: {failed_count}\n" + f" Skipped: {skipped_count}\n" + ) + + if failure_messages: + comment = f"{summary}\n{'='*50}\nFailure Details:\n\n" + "\n\n".join(failure_messages) + else: + comment = summary + + # Step 6: Create aggregated result + result = TestRailResult( + case_id=case_id, + status_id=overall_status, + elapsed=total_time if total_time > 0 else None, # Pass numeric value, not formatted string + custom_step_results=scenarios, + comment=comment, + ) + + # Step 7: Create test case + test_case = TestRailCase( + title=feature_name, + case_id=case_id, + result=result, + ) + + self.env.log( + f"BDD: Grouped {total_count} scenario(s) under case C{case_id} " + f"'{feature_name}' → {overall_status_label}" + ) + self.env.log(f" Breakdown: {passed_count} passed, {failed_count} failed, " f"{skipped_count} skipped") + + return test_case + def parse_file(self) -> List[TestRailSuite]: self.env.log("Parsing JUnit report.") suite = JUnitXml.fromfile(self.filepath, parse_func=self._add_root_element_to_tree) @@ -296,11 +700,13 @@ def parse_file(self) -> List[TestRailSuite]: testrail_sections = self._parse_sections(suite) suite_name = self.env.suite_name if self.env.suite_name else suite.name - testrail_suites.append(TestRailSuite( - suite_name, - testsections=testrail_sections, - source=self.filename, - )) + testrail_suites.append( + TestRailSuite( + suite_name, + testsections=testrail_sections, + source=self.filename, + ) + ) return testrail_suites @@ -310,9 +716,9 @@ def _split_sauce_report(self, suite) -> List[JUnitXml]: for section in suite: if not len(section): continue - divider_index = section.name.find('-') + divider_index = section.name.find("-") subsuite_name = section.name[:divider_index].strip() - section.name = section.name[divider_index + 1:].strip() + section.name = section.name[divider_index + 1 :].strip() new_xml = JUnitXml(subsuite_name) if subsuite_name not in subsuites.keys(): subsuites[subsuite_name] = new_xml @@ -344,5 +750,6 @@ def _split_sauce_report(self, suite) -> List[JUnitXml]: return [v for k, v in subsuites.items()] -if __name__ == '__main__': + +if __name__ == "__main__": pass diff --git a/trcli/settings.py b/trcli/settings.py index 7cd59dc..829af7d 100644 --- a/trcli/settings.py +++ b/trcli/settings.py @@ -1,6 +1,8 @@ MAX_WORKERS_ADD_CASE = 10 -MAX_WORKERS_ADD_RESULTS = 10 -DEFAULT_API_CALL_RETRIES = 3 -DEFAULT_API_CALL_TIMEOUT = 30 +MAX_WORKERS_ADD_RESULTS = 20 +DEFAULT_API_CALL_RETRIES = 5 +DEFAULT_API_CALL_TIMEOUT = 60 DEFAULT_BATCH_SIZE = 50 ALLOW_ELAPSED_MS = False +ENABLE_PARALLEL_PAGINATION = False +MAX_WORKERS_PARALLEL_PAGINATION = 10 diff --git a/verify_gherkin_parsing.py b/verify_gherkin_parsing.py new file mode 100644 index 0000000..51db0e2 --- /dev/null +++ b/verify_gherkin_parsing.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +""" +Verification script for gherkin-official library parsing capabilities. +This script tests the parsing of .feature files and displays the parsed structure. +""" + +import json +from pathlib import Path +from gherkin.parser import Parser +from gherkin.token_scanner import TokenScanner + + +def parse_feature_file(feature_path: Path): + """Parse a Gherkin .feature file and return the parsed document.""" + print(f"\n{'='*80}") + print(f"Parsing: {feature_path.name}") + print(f"{'='*80}\n") + + try: + # Read the feature file + with open(feature_path, "r", encoding="utf-8") as f: + feature_text = f.read() + + # Parse using gherkin-official + parser = Parser() + token_scanner = TokenScanner(feature_text) + gherkin_document = parser.parse(token_scanner) + + # Display parsed structure + print("✓ Successfully parsed feature file!\n") + + # Extract key information + feature = gherkin_document.get("feature") + if feature: + print(f"Feature Name: {feature.get('name')}") + print(f"Description: {feature.get('description', 'N/A')}") + print(f"Language: {feature.get('language', 'en')}") + print(f"Tags: {[tag['name'] for tag in feature.get('tags', [])]}") + + # Count scenarios + scenarios = [child for child in feature.get("children", []) if child.get("scenario")] + scenario_outlines = [child for child in feature.get("children", []) if child.get("scenarioOutline")] + background = [child for child in feature.get("children", []) if child.get("background")] + + print(f"\nStructure:") + print(f" - Background: {len(background)}") + print(f" - Scenarios: {len(scenarios)}") + print(f" - Scenario Outlines: {len(scenario_outlines)}") + + # Display scenarios + print(f"\nScenarios Found:") + for idx, child in enumerate(feature.get("children", []), 1): + if child.get("scenario"): + scenario = child["scenario"] + tags = [tag["name"] for tag in scenario.get("tags", [])] + steps = scenario.get("steps", []) + print(f" {idx}. {scenario.get('name')} (Tags: {tags})") + print(f" Steps: {len(steps)}") + elif child.get("scenarioOutline"): + outline = child["scenarioOutline"] + tags = [tag["name"] for tag in outline.get("tags", [])] + examples = outline.get("examples", []) + print(f" {idx}. {outline.get('name')} (Outline, Tags: {tags})") + print(f" Examples rows: {len(examples[0].get('tableBody', [])) if examples else 0}") + + # Display full parsed document (formatted JSON) + print(f"\n{'-'*80}") + print("Full Parsed Document (JSON):") + print(f"{'-'*80}") + print(json.dumps(gherkin_document, indent=2)) + + return gherkin_document + + except Exception as e: + print(f"✗ Error parsing feature file: {e}") + raise + + +def main(): + """Main function to test gherkin parsing.""" + print("\n" + "=" * 80) + print("GHERKIN-OFFICIAL LIBRARY VERIFICATION") + print("=" * 80) + + # Test with the sample login feature + feature_path = Path(__file__).parent / "tests" / "test_data" / "FEATURE" / "sample_login.feature" + + if not feature_path.exists(): + print(f"\n✗ Feature file not found: {feature_path}") + return 1 + + try: + gherkin_doc = parse_feature_file(feature_path) + + print(f"\n{'='*80}") + print("✓ VERIFICATION SUCCESSFUL!") + print(f"{'='*80}") + print("\nKey Findings:") + print(" - gherkin-official library is working correctly") + print(" - Feature files can be parsed successfully") + print(" - Scenarios, steps, tags, and examples are extracted properly") + print(" - Ready for integration into TRCLI parser") + + return 0 + + except Exception as e: + print(f"\n{'='*80}") + print("✗ VERIFICATION FAILED!") + print(f"{'='*80}") + print(f"\nError: {e}") + return 1 + + +if __name__ == "__main__": + exit(main())