diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6acb220..6be3bf2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ on: paths-ignore: - '.github/**' - CHANGELOG.md - - README.rst + - README.md - CONTRIBUTING.rst env: @@ -41,7 +41,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.10' - name: Install dependencies run: python -m pip install --upgrade pip setuptools wheel diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 234988f..779affa 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [ '3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ] + python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ] steps: - name: Checkout repository uses: actions/checkout@v4 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2c83d96..8b2adf5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,27 +1,32 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files -- repo: https://github.com/PyCQA/pydocstyle - rev: 6.0.0 + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + - repo: https://github.com/PyCQA/pydocstyle + rev: 6.3.0 hooks: - - id: pydocstyle + - id: pydocstyle exclude: | - (?x)^( - tests/.* | - examples/.* - ) -- repo: https://github.com/Lucas-C/pre-commit-hooks-markup - rev: v1.0.1 + (?x)^( + tests/.* | + examples/.* + ) + - repo: https://github.com/psf/black + rev: 24.10.0 + hooks: + - id: black + args: [ '--check', 'pytest_reportportal', 'tests' ] + - repo: https://github.com/pycqa/isort + rev: 6.0.0 hooks: - - id: rst-linter -- repo: https://github.com/pycqa/flake8 - rev: 5.0.4 + - id: isort + - repo: https://github.com/pycqa/flake8 + rev: 7.1.1 hooks: - - id: flake8 + - id: flake8 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5125920..675bbc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,14 @@ # Changelog ## [Unreleased] +### Removed +- `Python 3.7` support, by @HardNorth + +## [5.4.7] ### Added -- Escaping of binary symbol '\0' in parameters, by @HardNorth +- Issue [#382](https://github.com/reportportal/agent-python-pytest/issues/382): Escaping of binary symbol '\0' in parameters, by @HardNorth +### Changed +- Client version updated on [5.6.0](https://github.com/reportportal/client-Python/releases/tag/5.6.0), by @HardNorth ## [5.4.6] ### Added diff --git a/README.md b/README.md new file mode 100644 index 0000000..d4d7928 --- /dev/null +++ b/README.md @@ -0,0 +1,122 @@ +# ReportPortal integration for pytest framework + +Pytest plugin for reporting test results of the Pytest to the ReportPortal. + +> **DISCLAIMER**: We use Google Analytics for sending anonymous usage information such as agent's and client's names, +> and their versions after a successful launch start. This information might help us to improve both ReportPortal +> backend and client sides. It is used by the ReportPortal team only and is not supposed for sharing with 3rd parties. + +[![PyPI](https://img.shields.io/pypi/v/pytest-reportportal.svg?maxAge=259200)](https://pypi.python.org/pypi/pytest-reportportal) +[![Python versions](https://img.shields.io/pypi/pyversions/pytest-reportportal.svg)](https://pypi.org/project/pytest-reportportal) +[![Tests](https://github.com/reportportal/agent-python-pytest/actions/workflows/tests.yml/badge.svg)](https://github.com/reportportal/agent-python-pytest/actions/workflows/tests.yml) +[![codecov](https://codecov.io/gh/reportportal/agent-python-pytest/graph/badge.svg?token=x5ZHqZKJFV)](https://codecov.io/gh/reportportal/agent-python-pytest) +[![Join Slack chat!](https://img.shields.io/badge/slack-join-brightgreen.svg)](https://slack.epmrpp.reportportal.io/) +[![stackoverflow](https://img.shields.io/badge/reportportal-stackoverflow-orange.svg?style=flat)](http://stackoverflow.com/questions/tagged/reportportal) +[![Build with Love](https://img.shields.io/badge/build%20with-❤%EF%B8%8F%E2%80%8D-lightgrey.svg)](http://reportportal.io?style=flat) + +## Installation + +To install pytest plugin execute next command in a terminal: + +```bash +pip install pytest-reportportal +``` + +Look through the `CONTRIBUTING.rst` for contribution guidelines. + +## Configuration + +Prepare the config file `pytest.ini` in root directory of tests or specify any one using pytest command line option: + +```bash +py.test -c config.cfg +``` + +The `pytest.ini` file should have next mandatory fields: + +- `rp_api_key` - value could be found in the User Profile section +- `rp_project` - name of project in ReportPortal +- `rp_endpoint` - address of ReportPortal Server + +Example of `pytest.ini`: + +```text +[pytest] +rp_api_key = fb586627-32be-47dd-93c1-678873458a5f +rp_endpoint = http://192.168.1.10:8080 +rp_project = user_personal +rp_launch = AnyLaunchName +rp_launch_attributes = 'PyTest' 'Smoke' +rp_launch_description = 'Smoke test' +rp_ignore_attributes = 'xfail' 'usefixture' +``` + +- The `rp_api_key` can also be set with the environment variable `RP_API_KEY`. This will override the value set for `rp_api_key` in pytest.ini + +There are also optional parameters: +https://reportportal.io/docs/log-data-in-reportportal/test-framework-integration/Python/pytest/ + +## Examples + +For logging of the test item flow to ReportPortal, please, use the python logging handler provided by plugin like +below: + +in `conftest.py`: + +```python +import logging + +import pytest + +from reportportal_client import RPLogger + + +@pytest.fixture(scope="session") +def rp_logger(): + logger = logging.getLogger(__name__) + logger.setLevel(logging.DEBUG) + logging.setLoggerClass(RPLogger) + return logger +``` + +in tests: + +```python +# In this case only INFO messages will be sent to the ReportPortal. +def test_one(rp_logger): + rp_logger.info("Case1. Step1") + x = "this" + rp_logger.info("x is: %s", x) + assert 'h' in x + + # Message with an attachment. + import subprocess + free_memory = subprocess.check_output("free -h".split()) + rp_logger.info( + "Case1. Memory consumption", + attachment={ + "name": "free_memory.txt", + "data": free_memory, + "mime": "application/octet-stream", + }, + ) + + # This debug message will not be sent to the ReportPortal. + rp_logger.debug("Case1. Debug message") +``` + +## Launching + +To run test with ReportPortal you must provide `--reportportal` flag: + +```bash +py.test ./tests --reportportal +``` + +Check the documentation to find more detailed information about how to integrate pytest with ReportPortal using the +agent: +https://reportportal.io/docs/log-data-in-reportportal/test-framework-integration/Python/pytest/ + +## Copyright Notice + +Licensed under the [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) license (see the LICENSE file). diff --git a/README.rst b/README.rst deleted file mode 100644 index 4402c3c..0000000 --- a/README.rst +++ /dev/null @@ -1,141 +0,0 @@ -=================== -agent-python-pytest -=================== - -.. image:: https://img.shields.io/pypi/v/pytest-reportportal.svg - :target: https://pypi.python.org/pypi/pytest-reportportal - :alt: Latest Version -.. image:: https://img.shields.io/pypi/pyversions/pytest-reportportal.svg - :target: https://pypi.org/project/pytest-reportportal - :alt: Supported python versions -.. image:: https://github.com/reportportal/agent-python-pytest/actions/workflows/tests.yml/badge.svg - :target: https://github.com/reportportal/agent-python-pytest/actions/workflows/tests.yml - :alt: Test status -.. image:: https://codecov.io/gh/reportportal/agent-python-pytest/branch/develop/graph/badge.svg - :target: https://codecov.io/gh/reportportal/agent-python-pytest - :alt: Test coverage -.. image:: https://img.shields.io/badge/slack-join-brightgreen.svg - :target: https://slack.epmrpp.reportportal.io/ - :alt: Join Slack chat! - - -Pytest plugin for reporting test results of the Pytest to the ReportPortal. - -Installation -~~~~~~~~~~~~ - -To install pytest plugin execute next command in a terminal: - -.. code-block:: bash - - pip install pytest-reportportal - - - -Look through the CONTRIBUTING.rst for contribution guidelines. - -Configuration -~~~~~~~~~~~~~ - -Prepare the config file :code:`pytest.ini` in root directory of tests or specify -any one using pytest command line option: - -.. code-block:: bash - - py.test -c config.cfg - - -The :code:`pytest.ini` file should have next mandatory fields: - -- :code:`rp_api_key` - value could be found in the User Profile section -- :code:`rp_project` - name of project in ReportPortal -- :code:`rp_endpoint` - address of ReportPortal Server - -Example of :code:`pytest.ini`: - -.. code-block:: text - - [pytest] - rp_api_key = fb586627-32be-47dd-93c1-678873458a5f - rp_endpoint = http://192.168.1.10:8080 - rp_project = user_personal - rp_launch = AnyLaunchName - rp_launch_attributes = 'PyTest' 'Smoke' - rp_launch_description = 'Smoke test' - rp_ignore_attributes = 'xfail' 'usefixture' - -- The :code:`rp_api_key` can also be set with the environment variable `RP_API_KEY`. This will override the value set for :code:`rp_api_key` in pytest.ini - -There are also optional parameters: -https://reportportal.io/docs/log-data-in-reportportal/test-framework-integration/Python/pytest/ - -Examples -~~~~~~~~ - -For logging of the test item flow to ReportPortal, please, use the python -logging handler provided by plugin like bellow: - -in conftest.py: - -.. code-block:: python - - import logging - import sys - - import pytest - - from reportportal_client import RPLogger - - - @pytest.fixture(scope="session") - def rp_logger(): - logger = logging.getLogger(__name__) - logger.setLevel(logging.DEBUG) - logging.setLoggerClass(RPLogger) - return logger - -in tests: - -.. code-block:: python - - # In this case only INFO messages will be sent to the ReportPortal. - def test_one(rp_logger): - rp_logger.info("Case1. Step1") - x = "this" - rp_logger.info("x is: %s", x) - assert 'h' in x - - # Message with an attachment. - import subprocess - free_memory = subprocess.check_output("free -h".split()) - rp_logger.info( - "Case1. Memory consumption", - attachment={ - "name": "free_memory.txt", - "data": free_memory, - "mime": "application/octet-stream", - }, - ) - - # This debug message will not be sent to the ReportPortal. - rp_logger.debug("Case1. Debug message") - -Launching -~~~~~~~~~ - -To run test with ReportPortal you must provide '--reportportal' flag: - -.. code-block:: bash - - py.test ./tests --reportportal - -Check the documentation to find more detailed information about how to integrate pytest with ReportPortal using an agent: -https://reportportal.io/docs/log-data-in-reportportal/test-framework-integration/Python/pytest/ - -Copyright Notice ----------------- -.. Copyright Notice: https://github.com/reportportal/agent-python-pytest#copyright-notice - -Licensed under the `Apache 2.0`_ license (see the LICENSE file). - -.. _Apache 2.0: https://www.apache.org/licenses/LICENSE-2.0 diff --git a/examples/attributes/test_runtime_attribute.py b/examples/attributes/test_runtime_attribute.py index 45ecd22..55fad91 100644 --- a/examples/attributes/test_runtime_attribute.py +++ b/examples/attributes/test_runtime_attribute.py @@ -20,7 +20,5 @@ def test_custom_attributes_report(request): This is a test with one custom marker as a decorator and one custom marker added at runtime which shall both appear on ReportPortal on test's item """ - request.node.add_marker( - pytest.mark.runtime() - ) + request.node.add_marker(pytest.mark.runtime()) assert True diff --git a/examples/bdd/features/arguments_four_steps.feature b/examples/bdd/features/arguments_four_steps.feature new file mode 100644 index 0000000..a71ab7c --- /dev/null +++ b/examples/bdd/features/arguments_four_steps.feature @@ -0,0 +1,8 @@ +Feature: Four step arguments + Scenario: Arguments for given, when, and, then + Given there are 5 cucumbers + + When I eat 3 cucumbers + And I eat 2 cucumbers + + Then I should have 0 cucumbers diff --git a/examples/bdd/features/arguments_four_steps_description.feature b/examples/bdd/features/arguments_four_steps_description.feature new file mode 100644 index 0000000..9d4285c --- /dev/null +++ b/examples/bdd/features/arguments_four_steps_description.feature @@ -0,0 +1,11 @@ +Feature: Four step arguments + Description for the feature + + Scenario: Arguments for given, when, and, then + Description for the scenario + + Given there are 5 cucumbers + When I eat 3 cucumbers + And I eat 2 cucumbers + + Then I should have 0 cucumbers diff --git a/examples/bdd/features/arguments_four_steps_docstring.feature b/examples/bdd/features/arguments_four_steps_docstring.feature new file mode 100644 index 0000000..de6956d --- /dev/null +++ b/examples/bdd/features/arguments_four_steps_docstring.feature @@ -0,0 +1,15 @@ +Feature: Four step arguments + Description for the feature + + Scenario: Arguments for given, when, and, then + Description for the scenario + + Given there are 5 cucumbers + """ + Docstring for the step + """ + + When I eat 3 cucumbers + And I eat 2 cucumbers + + Then I should have 0 cucumbers diff --git a/examples/bdd/features/arguments_three_steps.feature b/examples/bdd/features/arguments_three_steps.feature new file mode 100644 index 0000000..da740d7 --- /dev/null +++ b/examples/bdd/features/arguments_three_steps.feature @@ -0,0 +1,7 @@ +Feature: Three step arguments + Scenario: Arguments for given, when, then, less steps + Given there are 5 cucumbers + + When I eat 5 cucumbers + + Then I should have 0 cucumbers diff --git a/examples/bdd/features/arguments_two_scenarios.feature b/examples/bdd/features/arguments_two_scenarios.feature new file mode 100644 index 0000000..63d0221 --- /dev/null +++ b/examples/bdd/features/arguments_two_scenarios.feature @@ -0,0 +1,15 @@ +Feature: Two scenarios step arguments + Scenario: Arguments for given, when, then + Given there are 5 cucumbers + + When I eat 5 cucumbers + + Then I should have 0 cucumbers + + Scenario: Arguments for given, when, and, then + Given there are 5 cucumbers + + When I eat 3 cucumbers + And I eat 2 cucumbers + + Then I should have 0 cucumbers diff --git a/examples/bdd/features/background_scenario.feature b/examples/bdd/features/background_scenario.feature new file mode 100644 index 0000000..c67e488 --- /dev/null +++ b/examples/bdd/features/background_scenario.feature @@ -0,0 +1,10 @@ +Feature: Test scenario with a background + + Background: Init our scenario + Given I have empty step + + Scenario: The first scenario + Then I have another empty step + + Scenario: The second scenario + Then I have one more empty step diff --git a/examples/bdd/features/background_two_steps.feature b/examples/bdd/features/background_two_steps.feature new file mode 100644 index 0000000..87075ea --- /dev/null +++ b/examples/bdd/features/background_two_steps.feature @@ -0,0 +1,7 @@ +Feature: Test scenario with a background with two steps + Background: + Given I have first empty step + And I have second empty step + + Scenario: The scenario + Then I have main step diff --git a/examples/bdd/features/belly.feature b/examples/bdd/features/belly.feature new file mode 100644 index 0000000..6686183 --- /dev/null +++ b/examples/bdd/features/belly.feature @@ -0,0 +1,8 @@ +@smoke @test @feature:belly +Feature: Belly + + @ok @key:value + Scenario: a few cukes + Given I have 42 cukes in my belly + When I wait 1 hour + Then my belly should growl diff --git a/examples/bdd/features/custom_test_case_id.feature b/examples/bdd/features/custom_test_case_id.feature new file mode 100644 index 0000000..9d6618d --- /dev/null +++ b/examples/bdd/features/custom_test_case_id.feature @@ -0,0 +1,5 @@ +Feature: Test dummy scenario + + @tc_id:my_tc_id + Scenario: The scenario + Given I have empty step diff --git a/examples/bdd/features/data_table_parameter.feature b/examples/bdd/features/data_table_parameter.feature new file mode 100644 index 0000000..3753571 --- /dev/null +++ b/examples/bdd/features/data_table_parameter.feature @@ -0,0 +1,6 @@ +Feature: A basic test with a Data Table parameter + + Scenario: Test with Data Table + Given a step with a data table: + | key | value | + | myKey | myValue | diff --git a/examples/bdd/features/doc_string_parameters.feature b/examples/bdd/features/doc_string_parameters.feature new file mode 100644 index 0000000..3216259 --- /dev/null +++ b/examples/bdd/features/doc_string_parameters.feature @@ -0,0 +1,8 @@ +Feature: Basic test with a docstring parameter + + Scenario: Test with a docstring parameter + Given I have a docstring parameter: + """ + My very long parameter + With some new lines + """ diff --git a/examples/bdd/features/dynamic_scenario_outline_names.feature b/examples/bdd/features/dynamic_scenario_outline_names.feature new file mode 100644 index 0000000..0f3ed07 --- /dev/null +++ b/examples/bdd/features/dynamic_scenario_outline_names.feature @@ -0,0 +1,11 @@ +Feature: Dynamic scenario outline names + + Scenario Outline: Test with the parameter + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | diff --git a/examples/bdd/features/examples_tags.feature b/examples/bdd/features/examples_tags.feature new file mode 100644 index 0000000..77b3707 --- /dev/null +++ b/examples/bdd/features/examples_tags.feature @@ -0,0 +1,13 @@ +Feature: Basic test with parameters + + Scenario Outline: Test with different parameters + Given It is test with parameters + When I have parameter + Then I emit number on level info + + @test + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | + | "third" | 12345678 | diff --git a/examples/bdd/features/failed_scenario.feature b/examples/bdd/features/failed_scenario.feature new file mode 100644 index 0000000..861fb44 --- /dev/null +++ b/examples/bdd/features/failed_scenario.feature @@ -0,0 +1,4 @@ +Feature: Test failed scenario + + Scenario: The scenario + Given I have a failed step diff --git a/examples/bdd/features/rule_description.feature b/examples/bdd/features/rule_description.feature new file mode 100644 index 0000000..5d5ecdb --- /dev/null +++ b/examples/bdd/features/rule_description.feature @@ -0,0 +1,7 @@ +Feature: Test rule keyword + + Rule: The first rule + Description for the Rule + + Scenario: The first scenario + Given I have empty step diff --git a/examples/bdd/features/rule_keyword.feature b/examples/bdd/features/rule_keyword.feature new file mode 100644 index 0000000..0b37315 --- /dev/null +++ b/examples/bdd/features/rule_keyword.feature @@ -0,0 +1,15 @@ +Feature: Test rule keyword + + Rule: The first rule + Scenario: The first scenario + Given I have empty step + Then I have another empty step + + Scenario: The second scenario + Given I have empty step + Then I have one more empty step + + Rule: The second rule + Scenario: The third scenario + Given I have empty step + Then I have one more else empty step diff --git a/examples/bdd/features/scenario_outline_background.feature b/examples/bdd/features/scenario_outline_background.feature new file mode 100644 index 0000000..a1dddd3 --- /dev/null +++ b/examples/bdd/features/scenario_outline_background.feature @@ -0,0 +1,14 @@ +Feature: Basic test with parameters and background + + Background: + Given I have empty step in background + + Scenario Outline: Test with different parameters + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | diff --git a/examples/bdd/features/scenario_outline_description.feature b/examples/bdd/features/scenario_outline_description.feature new file mode 100644 index 0000000..371ef7c --- /dev/null +++ b/examples/bdd/features/scenario_outline_description.feature @@ -0,0 +1,13 @@ +Feature: Basic test with parameters and description + + Scenario Outline: Test with different parameters + The description for the scenario outline + + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | diff --git a/examples/bdd/features/scenario_outline_fail.feature b/examples/bdd/features/scenario_outline_fail.feature new file mode 100644 index 0000000..049a70c --- /dev/null +++ b/examples/bdd/features/scenario_outline_fail.feature @@ -0,0 +1,12 @@ +Feature: Basic test with parameters which fails + + Scenario Outline: Test with different parameters failing + Given It is test with parameters + When I have parameter + Then I emit number on level info + Then I fail + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | diff --git a/examples/bdd/features/scenario_outline_parameters.feature b/examples/bdd/features/scenario_outline_parameters.feature new file mode 100644 index 0000000..6913cbe --- /dev/null +++ b/examples/bdd/features/scenario_outline_parameters.feature @@ -0,0 +1,12 @@ +Feature: Basic test with parameters + + Scenario Outline: Test with different parameters + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | + | "third" | 12345678 | diff --git a/examples/bdd/features/scenario_outline_test_case_id.feature b/examples/bdd/features/scenario_outline_test_case_id.feature new file mode 100644 index 0000000..b14a57e --- /dev/null +++ b/examples/bdd/features/scenario_outline_test_case_id.feature @@ -0,0 +1,11 @@ +Feature: Basic test with parameters + + @tc_id:outline_tc_id + Scenario Outline: Test with different parameters + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | diff --git a/examples/bdd/step_defs/__init__.py b/examples/bdd/step_defs/__init__.py new file mode 100644 index 0000000..8175387 --- /dev/null +++ b/examples/bdd/step_defs/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/examples/bdd/step_defs/custom_test_case_id_steps.py b/examples/bdd/step_defs/custom_test_case_id_steps.py new file mode 100644 index 0000000..6528831 --- /dev/null +++ b/examples/bdd/step_defs/custom_test_case_id_steps.py @@ -0,0 +1,27 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, scenarios + +# Import the scenario from the feature file +scenarios("../features/custom_test_case_id.feature") + +LOGGER = logging.getLogger(__name__) + + +@given("I have empty step") +def step_with_custom_test_case_id(): + LOGGER.info("I have empty step") diff --git a/examples/bdd/step_defs/data_table_parameter_steps.py b/examples/bdd/step_defs/data_table_parameter_steps.py new file mode 100644 index 0000000..be4aceb --- /dev/null +++ b/examples/bdd/step_defs/data_table_parameter_steps.py @@ -0,0 +1,33 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict + +from pytest_bdd import given, scenarios + +# Import the scenario from the feature file +scenarios("../features/data_table_parameter.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("a step with a data table:") +def step_with_data_table(datatable: Dict[str, str]) -> None: + """Step that receives a data table and logs its content. + + :param datatable: Data table from the feature file + """ + LOGGER.info("Data table content: %s", datatable) diff --git a/examples/bdd/step_defs/doc_string_parameters_steps.py b/examples/bdd/step_defs/doc_string_parameters_steps.py new file mode 100644 index 0000000..a319ab9 --- /dev/null +++ b/examples/bdd/step_defs/doc_string_parameters_steps.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, scenarios + +# Import the scenario from the feature file +scenarios("../features/doc_string_parameters.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("I have a docstring parameter:") +def step_with_docstring(docstring: str) -> None: + """Step that receives a docstring and logs its content. + + :param docstring: Multi-line docstring from the feature file + """ + LOGGER.info("Docstring content: %s", docstring) diff --git a/examples/bdd/step_defs/example_tags_steps.py b/examples/bdd/step_defs/example_tags_steps.py new file mode 100644 index 0000000..9eef907 --- /dev/null +++ b/examples/bdd/step_defs/example_tags_steps.py @@ -0,0 +1,38 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/examples_tags.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/scenario_outline_background_steps.py b/examples/bdd/step_defs/scenario_outline_background_steps.py new file mode 100644 index 0000000..ee75256 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_background_steps.py @@ -0,0 +1,44 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_background.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("I have empty step in background") +def empty_step(): + """Empty step implementation.""" + pass + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/scenario_outline_description_steps.py b/examples/bdd/step_defs/scenario_outline_description_steps.py new file mode 100644 index 0000000..4ff1765 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_description_steps.py @@ -0,0 +1,38 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_description.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/scenario_outline_fail_steps.py b/examples/bdd/step_defs/scenario_outline_fail_steps.py new file mode 100644 index 0000000..ebca0a3 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_fail_steps.py @@ -0,0 +1,43 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_fail.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) + + +@then("I fail") +def fail_step(): + raise AssertionError("This step always fails") diff --git a/examples/bdd/step_defs/scenario_outline_name_steps.py b/examples/bdd/step_defs/scenario_outline_name_steps.py new file mode 100644 index 0000000..75b6d69 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_name_steps.py @@ -0,0 +1,38 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/dynamic_scenario_outline_names.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/scenario_outline_parameters_steps.py b/examples/bdd/step_defs/scenario_outline_parameters_steps.py new file mode 100644 index 0000000..ece05e1 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_parameters_steps.py @@ -0,0 +1,38 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_parameters.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/scenario_outline_test_case_id_steps.py b/examples/bdd/step_defs/scenario_outline_test_case_id_steps.py new file mode 100644 index 0000000..d5174aa --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_test_case_id_steps.py @@ -0,0 +1,37 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_test_case_id.feature") + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/test_arguments.py b/examples/bdd/step_defs/test_arguments.py new file mode 100644 index 0000000..97e27fb --- /dev/null +++ b/examples/bdd/step_defs/test_arguments.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, parsers, scenarios, then, when + +scenarios("../features/arguments_four_steps.feature") + + +@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "eat": 0} + + +@when(parsers.parse("I eat {eat:d} cucumbers")) +def eat_cucumbers(cucumbers, eat): + cucumbers["eat"] += eat + + +@then(parsers.parse("I should have {left:d} cucumbers")) +def should_have_left_cucumbers(cucumbers, left): + assert cucumbers["start"] - cucumbers["eat"] == left diff --git a/examples/bdd/step_defs/test_arguments_description.py b/examples/bdd/step_defs/test_arguments_description.py new file mode 100644 index 0000000..d1301b7 --- /dev/null +++ b/examples/bdd/step_defs/test_arguments_description.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, parsers, scenarios, then, when + +scenarios("../features/arguments_four_steps_description.feature") + + +@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "eat": 0} + + +@when(parsers.parse("I eat {eat:d} cucumbers")) +def eat_cucumbers(cucumbers, eat): + cucumbers["eat"] += eat + + +@then(parsers.parse("I should have {left:d} cucumbers")) +def should_have_left_cucumbers(cucumbers, left): + assert cucumbers["start"] - cucumbers["eat"] == left diff --git a/examples/bdd/step_defs/test_arguments_two_features.py b/examples/bdd/step_defs/test_arguments_two_features.py new file mode 100644 index 0000000..0f268c5 --- /dev/null +++ b/examples/bdd/step_defs/test_arguments_two_features.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, parsers, scenarios, then, when + +scenarios("../features/arguments_four_steps.feature", "../features/arguments_three_steps.feature") + + +@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "eat": 0} + + +@when(parsers.parse("I eat {eat:d} cucumbers")) +def eat_cucumbers(cucumbers, eat): + cucumbers["eat"] += eat + + +@then(parsers.parse("I should have {left:d} cucumbers")) +def should_have_left_cucumbers(cucumbers, left): + assert cucumbers["start"] - cucumbers["eat"] == left diff --git a/examples/bdd/step_defs/test_arguments_two_scenarios.py b/examples/bdd/step_defs/test_arguments_two_scenarios.py new file mode 100644 index 0000000..7e7db79 --- /dev/null +++ b/examples/bdd/step_defs/test_arguments_two_scenarios.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, parsers, scenarios, then, when + +scenarios("../features/arguments_two_scenarios.feature") + + +@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "eat": 0} + + +@when(parsers.parse("I eat {eat:d} cucumbers")) +def eat_cucumbers(cucumbers, eat): + cucumbers["eat"] += eat + + +@then(parsers.parse("I should have {left:d} cucumbers")) +def should_have_left_cucumbers(cucumbers, left): + assert cucumbers["start"] - cucumbers["eat"] == left diff --git a/examples/bdd/step_defs/test_background.py b/examples/bdd/step_defs/test_background.py new file mode 100644 index 0000000..fff783d --- /dev/null +++ b/examples/bdd/step_defs/test_background.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, scenarios, then + +scenarios("../features/background_scenario.feature") + + +@given("I have empty step") +def empty_step(): + pass + + +@then("I have another empty step") +def another_empty_step(): + pass + + +@then("I have one more empty step") +def one_more_empty_step(): + pass diff --git a/examples/bdd/step_defs/test_background_two_steps.py b/examples/bdd/step_defs/test_background_two_steps.py new file mode 100644 index 0000000..ad81b76 --- /dev/null +++ b/examples/bdd/step_defs/test_background_two_steps.py @@ -0,0 +1,35 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, scenarios, then + +scenarios("../features/background_two_steps.feature") + + +@given("I have first empty step") +def first_empty_step(): + """First empty step implementation.""" + pass + + +@given("I have second empty step") +def second_empty_step(): + """Second empty step implementation.""" + pass + + +@then("I have main step") +def main_step(): + """Main step implementation.""" + pass diff --git a/examples/bdd/step_defs/test_belly.py b/examples/bdd/step_defs/test_belly.py new file mode 100644 index 0000000..5afcd97 --- /dev/null +++ b/examples/bdd/step_defs/test_belly.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, parsers, scenarios, then, when + +scenarios("../features/belly.feature") + + +@given(parsers.parse("I have {start:d} cukes in my belly"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "wait": 0} + + +@when(parsers.parse("I wait {hours:d} hour")) +def then_wait(cucumbers, hours): + cucumbers["wait"] += hours + + +@then("my belly should growl") +def assert_growl(cucumbers): + assert cucumbers["start"] == cucumbers["wait"] * 42 diff --git a/examples/bdd/step_defs/test_failed_step.py b/examples/bdd/step_defs/test_failed_step.py new file mode 100644 index 0000000..e49b854 --- /dev/null +++ b/examples/bdd/step_defs/test_failed_step.py @@ -0,0 +1,22 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, scenarios + +scenarios("../features/failed_scenario.feature") + + +@given("I have a failed step") +def failed_step(): + assert False diff --git a/examples/bdd/step_defs/test_rule_description_steps.py b/examples/bdd/step_defs/test_rule_description_steps.py new file mode 100644 index 0000000..60beafa --- /dev/null +++ b/examples/bdd/step_defs/test_rule_description_steps.py @@ -0,0 +1,25 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rule keyword test module.""" + +from pytest_bdd import given, scenarios + +scenarios("../features/rule_description.feature") + + +@given("I have empty step") +def empty_step(): + """Empty step implementation.""" + pass diff --git a/examples/bdd/step_defs/test_rule_steps.py b/examples/bdd/step_defs/test_rule_steps.py new file mode 100644 index 0000000..e981126 --- /dev/null +++ b/examples/bdd/step_defs/test_rule_steps.py @@ -0,0 +1,42 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rule keyword test module.""" +from pytest_bdd import given, scenarios, then + +scenarios("../features/rule_keyword.feature") + + +@given("I have empty step") +def empty_step(): + """Empty step implementation.""" + pass + + +@then("I have another empty step") +def another_empty_step(): + """Another empty step implementation.""" + pass + + +@then("I have one more empty step") +def one_more_empty_step(): + """One more empty step implementation.""" + pass + + +@then("I have one more else empty step") +def one_more_else_empty_step(): + """One more else empty step implementation.""" + pass diff --git a/examples/custom_name/test_custom_name_args.py b/examples/custom_name/test_custom_name_args.py index cf2386f..a20c3c6 100644 --- a/examples/custom_name/test_custom_name_args.py +++ b/examples/custom_name/test_custom_name_args.py @@ -13,7 +13,7 @@ # limitations under the License. import pytest -TEST_NAME_ARGS = 'Test name by mark' +TEST_NAME_ARGS = "Test name by mark" @pytest.mark.name(TEST_NAME_ARGS) diff --git a/examples/custom_name/test_custom_name_empty.py b/examples/custom_name/test_custom_name_empty.py index 50373fe..57ac526 100644 --- a/examples/custom_name/test_custom_name_empty.py +++ b/examples/custom_name/test_custom_name_empty.py @@ -13,7 +13,7 @@ # limitations under the License. import pytest -TEST_NAME_EMPTY = 'examples/custom_name/test_custom_name_empty.py::test_name_by_mark_empty' +TEST_NAME_EMPTY = "examples/custom_name/test_custom_name_empty.py::test_name_by_mark_empty" @pytest.mark.name() diff --git a/examples/custom_name/test_custom_name_kwargs.py b/examples/custom_name/test_custom_name_kwargs.py index c4bce53..997fdb9 100644 --- a/examples/custom_name/test_custom_name_kwargs.py +++ b/examples/custom_name/test_custom_name_kwargs.py @@ -13,7 +13,7 @@ # limitations under the License. import pytest -TEST_NAME_KWARGS = 'Test name by mark, kwargs' +TEST_NAME_KWARGS = "Test name by mark, kwargs" @pytest.mark.name(name=TEST_NAME_KWARGS) diff --git a/examples/fixtures/class_fixture_return/conftest.py b/examples/fixtures/class_fixture_return/conftest.py index da43a33..33e6d2b 100644 --- a/examples/fixtures/class_fixture_return/conftest.py +++ b/examples/fixtures/class_fixture_return/conftest.py @@ -17,7 +17,7 @@ import pytest -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def class_fixture_return_config(): - print('setup') + print("setup") return mock.Mock() diff --git a/examples/fixtures/class_fixture_return/test_fixture_class_setup.py b/examples/fixtures/class_fixture_return/test_fixture_class_setup.py index 78881af..5900957 100644 --- a/examples/fixtures/class_fixture_return/test_fixture_class_setup.py +++ b/examples/fixtures/class_fixture_return/test_fixture_class_setup.py @@ -24,6 +24,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + class TestClassOne: def test_fixture_class_setup_first(self, class_fixture_return_config): assert class_fixture_return_config is not None diff --git a/examples/fixtures/module_fixture_return/conftest.py b/examples/fixtures/module_fixture_return/conftest.py index 3336e6a..8d72be3 100644 --- a/examples/fixtures/module_fixture_return/conftest.py +++ b/examples/fixtures/module_fixture_return/conftest.py @@ -17,7 +17,7 @@ import pytest -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def module_fixture_return_config(): - print('setup') + print("setup") return mock.Mock() diff --git a/examples/fixtures/module_fixture_return/test_fixture_module_setup.py b/examples/fixtures/module_fixture_return/test_fixture_module_setup.py index b11fe6d..cfac0db 100644 --- a/examples/fixtures/module_fixture_return/test_fixture_module_setup.py +++ b/examples/fixtures/module_fixture_return/test_fixture_module_setup.py @@ -24,6 +24,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_fixture_module_setup_first(module_fixture_return_config): assert module_fixture_return_config is not None diff --git a/examples/fixtures/package_fixture_return/conftest.py b/examples/fixtures/package_fixture_return/conftest.py index f406944..a9bb436 100644 --- a/examples/fixtures/package_fixture_return/conftest.py +++ b/examples/fixtures/package_fixture_return/conftest.py @@ -17,7 +17,7 @@ import pytest -@pytest.fixture(scope='package') +@pytest.fixture(scope="package") def package_fixture_return_config(): - print('setup') + print("setup") return mock.Mock() diff --git a/examples/fixtures/package_fixture_return/test_fixture_package_setup_first.py b/examples/fixtures/package_fixture_return/test_fixture_package_setup_first.py index 4c42833..1037354 100644 --- a/examples/fixtures/package_fixture_return/test_fixture_package_setup_first.py +++ b/examples/fixtures/package_fixture_return/test_fixture_package_setup_first.py @@ -24,5 +24,6 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_fixture_package_setup_first(package_fixture_return_config): assert package_fixture_return_config is not None diff --git a/examples/fixtures/package_fixture_return/test_fixture_package_setup_second.py b/examples/fixtures/package_fixture_return/test_fixture_package_setup_second.py index 7ba2f2a..f66e738 100644 --- a/examples/fixtures/package_fixture_return/test_fixture_package_setup_second.py +++ b/examples/fixtures/package_fixture_return/test_fixture_package_setup_second.py @@ -24,5 +24,6 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_fixture_package_setup_second(package_fixture_return_config): assert package_fixture_return_config is not None diff --git a/examples/fixtures/session_fixture_return/conftest.py b/examples/fixtures/session_fixture_return/conftest.py index 9ab2017..fd08838 100644 --- a/examples/fixtures/session_fixture_return/conftest.py +++ b/examples/fixtures/session_fixture_return/conftest.py @@ -17,7 +17,7 @@ import pytest -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def session_fixture_return_config(): - print('setup') + print("setup") return mock.Mock() diff --git a/examples/fixtures/test_failure_fixture_teardown/conftest.py b/examples/fixtures/test_failure_fixture_teardown/conftest.py index 3100fac..e3df987 100644 --- a/examples/fixtures/test_failure_fixture_teardown/conftest.py +++ b/examples/fixtures/test_failure_fixture_teardown/conftest.py @@ -22,8 +22,8 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_BEFORE_YIELD = 'Log message before yield and test failure' -LOG_MESSAGE_TEARDOWN = 'Log message for teardown after test failure' +LOG_MESSAGE_BEFORE_YIELD = "Log message before yield and test failure" +LOG_MESSAGE_TEARDOWN = "Log message for teardown after test failure" @pytest.fixture diff --git a/examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py b/examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py index 31f6884..390feaa 100644 --- a/examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py +++ b/examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py @@ -24,5 +24,6 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_failure_fixture_teardown(test_failure_fixture_teardown_config): assert test_failure_fixture_teardown_config is None diff --git a/examples/fixtures/test_fixture_exit/test_fixture_exit.py b/examples/fixtures/test_fixture_exit/test_fixture_exit.py new file mode 100644 index 0000000..a7a0208 --- /dev/null +++ b/examples/fixtures/test_fixture_exit/test_fixture_exit.py @@ -0,0 +1,24 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + + +@pytest.fixture +def fixture_demo(): + pytest.exit("Some Message") + + +def test_exit(fixture_demo): + assert True diff --git a/examples/fixtures/test_fixture_return_none/conftest.py b/examples/fixtures/test_fixture_return_none/conftest.py index a81a879..a320912 100644 --- a/examples/fixtures/test_fixture_return_none/conftest.py +++ b/examples/fixtures/test_fixture_return_none/conftest.py @@ -21,7 +21,7 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_SETUP = 'Log message for setup and return None' +LOG_MESSAGE_SETUP = "Log message for setup and return None" @pytest.fixture diff --git a/examples/fixtures/test_fixture_setup/conftest.py b/examples/fixtures/test_fixture_setup/conftest.py index 36897fa..7d74f8e 100644 --- a/examples/fixtures/test_fixture_setup/conftest.py +++ b/examples/fixtures/test_fixture_setup/conftest.py @@ -22,7 +22,7 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_SETUP = 'Log message for setup' +LOG_MESSAGE_SETUP = "Log message for setup" @pytest.fixture diff --git a/examples/fixtures/test_fixture_setup_failure/conftest.py b/examples/fixtures/test_fixture_setup_failure/conftest.py index a6dfce2..5995d54 100644 --- a/examples/fixtures/test_fixture_setup_failure/conftest.py +++ b/examples/fixtures/test_fixture_setup_failure/conftest.py @@ -21,10 +21,10 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_SETUP = 'Log message for setup failure' +LOG_MESSAGE_SETUP = "Log message for setup failure" @pytest.fixture def test_fixture_setup_failure_config(): logging.error(LOG_MESSAGE_SETUP) - raise Exception('Fixture setup failed') + raise Exception("Fixture setup failed") diff --git a/examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py b/examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py index 76c53b0..db4fe95 100644 --- a/examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py +++ b/examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py @@ -32,7 +32,7 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_TEST = 'Log message for test of setup failure' +LOG_MESSAGE_TEST = "Log message for test of setup failure" def test_fixture_setup_failure(test_fixture_setup_failure_config): diff --git a/examples/fixtures/test_fixture_teardown/conftest.py b/examples/fixtures/test_fixture_teardown/conftest.py index 29b3e70..10d3d6a 100644 --- a/examples/fixtures/test_fixture_teardown/conftest.py +++ b/examples/fixtures/test_fixture_teardown/conftest.py @@ -22,8 +22,8 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_BEFORE_YIELD = 'Log message before yield' -LOG_MESSAGE_TEARDOWN = 'Log message for teardown' +LOG_MESSAGE_BEFORE_YIELD = "Log message before yield" +LOG_MESSAGE_TEARDOWN = "Log message for teardown" @pytest.fixture diff --git a/examples/fixtures/test_fixture_teardown_failure/conftest.py b/examples/fixtures/test_fixture_teardown_failure/conftest.py index 3315a86..17c5e21 100644 --- a/examples/fixtures/test_fixture_teardown_failure/conftest.py +++ b/examples/fixtures/test_fixture_teardown_failure/conftest.py @@ -22,8 +22,8 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_BEFORE_YIELD = 'Log message before yield and failure' -LOG_MESSAGE_TEARDOWN = 'Log message for failure teardown' +LOG_MESSAGE_BEFORE_YIELD = "Log message before yield and failure" +LOG_MESSAGE_TEARDOWN = "Log message for failure teardown" @pytest.fixture @@ -31,4 +31,4 @@ def test_fixture_teardown_failure_config(): logging.error(LOG_MESSAGE_BEFORE_YIELD) yield mock.Mock() logging.error(LOG_MESSAGE_TEARDOWN) - raise Exception('Fixture teardown failed') + raise Exception("Fixture teardown failed") diff --git a/examples/fixtures/test_fixture_yield_none/conftest.py b/examples/fixtures/test_fixture_yield_none/conftest.py index 28839ec..e35b52a 100644 --- a/examples/fixtures/test_fixture_yield_none/conftest.py +++ b/examples/fixtures/test_fixture_yield_none/conftest.py @@ -21,7 +21,7 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_SETUP = 'Log message for setup and yield None' +LOG_MESSAGE_SETUP = "Log message for setup and yield None" @pytest.fixture diff --git a/examples/params/test_binary_symbol_in_parameters.py b/examples/params/test_binary_symbol_in_parameters.py index 6ae5cac..241a1f6 100644 --- a/examples/params/test_binary_symbol_in_parameters.py +++ b/examples/params/test_binary_symbol_in_parameters.py @@ -1,16 +1,14 @@ """A simple example test with different parameter types.""" -import pytest +import pytest -BINARY_TEXT = 'Some text with binary symbol \0' +BINARY_TEXT = "Some text with binary symbol \0" -@pytest.mark.parametrize( - ['text'], [[BINARY_TEXT]] -) +@pytest.mark.parametrize(["text"], [[BINARY_TEXT]]) def test_in_class_parameterized(text): """ This is my test with different parameter types. """ assert text == BINARY_TEXT - assert text != BINARY_TEXT.replace('\0', '\\0') + assert text != BINARY_TEXT.replace("\0", "\\0") diff --git a/examples/params/test_different_parameter_types.py b/examples/params/test_different_parameter_types.py index c85b9de..d219884 100644 --- a/examples/params/test_different_parameter_types.py +++ b/examples/params/test_different_parameter_types.py @@ -1,10 +1,9 @@ """A simple example test with different parameter types.""" + import pytest -@pytest.mark.parametrize( - ['integer', 'floating_point', 'boolean', 'none'], [(1, 1.5, True, None)] -) +@pytest.mark.parametrize(["integer", "floating_point", "boolean", "none"], [(1, 1.5, True, None)]) def test_in_class_parameterized(integer, floating_point, boolean, none): """ This is my test with different parameter types. diff --git a/examples/params/test_in_class_parameterized.py b/examples/params/test_in_class_parameterized.py index cc58c1d..8c3403c 100644 --- a/examples/params/test_in_class_parameterized.py +++ b/examples/params/test_in_class_parameterized.py @@ -1,10 +1,11 @@ """A simple example test in a class with a parameter.""" + import pytest class Tests: - @pytest.mark.parametrize('param', ['param']) + @pytest.mark.parametrize("param", ["param"]) def test_in_class_parameterized(self, param): """ This is my test inside `Tests` class with a parameter diff --git a/examples/skip/test_simple_skip.py b/examples/skip/test_simple_skip.py index 64afdb1..69ce838 100644 --- a/examples/skip/test_simple_skip.py +++ b/examples/skip/test_simple_skip.py @@ -1,4 +1,5 @@ """Simple example skipped test.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +15,6 @@ import pytest -@pytest.mark.skip(reason='no way of currently testing this') +@pytest.mark.skip(reason="no way of currently testing this") def test_simple_skip(): assert False diff --git a/examples/skip/test_skip_issue.py b/examples/skip/test_skip_issue.py index c510a87..3674616 100644 --- a/examples/skip/test_skip_issue.py +++ b/examples/skip/test_skip_issue.py @@ -1,4 +1,5 @@ """Simple example skipped test.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,12 +14,12 @@ # limitations under the License import pytest -ID = 'ABC-1234' -REASON = 'some_bug' -TYPE = 'PB' +ID = "ABC-1234" +REASON = "some_bug" +TYPE = "PB" @pytest.mark.issue(issue_id=ID, reason=REASON, issue_type=TYPE) -@pytest.mark.skip(reason='no way of currently testing this') +@pytest.mark.skip(reason="no way of currently testing this") def test_simple_skip(): assert False diff --git a/examples/test_case_id/test_case_id_decorator.py b/examples/test_case_id/test_case_id_decorator.py index c826c7c..9bbda94 100644 --- a/examples/test_case_id/test_case_id_decorator.py +++ b/examples/test_case_id/test_case_id_decorator.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/test_case_id/test_case_id_decorator_no_id.py b/examples/test_case_id/test_case_id_decorator_no_id.py index 580f97d..5d59a68 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id.py +++ b/examples/test_case_id/test_case_id_decorator_no_id.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_no_id_params_false.py b/examples/test_case_id/test_case_id_decorator_no_id_params_false.py index dda3126..005046a 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id_params_false.py +++ b/examples/test_case_id/test_case_id_decorator_no_id_params_false.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(parameterized=False) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_no_id_params_true.py b/examples/test_case_id/test_case_id_decorator_no_id_params_true.py index c0ada01..5ff41b2 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id_params_true.py +++ b/examples/test_case_id/test_case_id_decorator_no_id_params_true.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(parameterized=True) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_no_id_partial_params.py b/examples/test_case_id/test_case_id_decorator_no_id_partial_params.py index 708fb7a..2a980bd 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id_partial_params.py +++ b/examples/test_case_id/test_case_id_decorator_no_id_partial_params.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) -@pytest.mark.tc_id(params=['param2']) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) +@pytest.mark.tc_id(params=["param2"]) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py b/examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py index a7953a7..2222af6 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py +++ b/examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) -@pytest.mark.tc_id(parameterized=True, params=['param2']) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) +@pytest.mark.tc_id(parameterized=True, params=["param2"]) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_params_false.py b/examples/test_case_id/test_case_id_decorator_params_false.py index 9c273af..0830783 100644 --- a/examples/test_case_id/test_case_id_decorator_params_false.py +++ b/examples/test_case_id/test_case_id_decorator_params_false.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator and parameters.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +18,7 @@ TEST_CASE_ID = "ISSUE-321" -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(TEST_CASE_ID, parameterized=False) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_params_no.py b/examples/test_case_id/test_case_id_decorator_params_no.py index 0fa5306..00cf143 100644 --- a/examples/test_case_id/test_case_id_decorator_params_no.py +++ b/examples/test_case_id/test_case_id_decorator_params_no.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator and parameters.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +18,7 @@ TEST_CASE_ID = "ISSUE-132" -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(TEST_CASE_ID) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_params_partially.py b/examples/test_case_id/test_case_id_decorator_params_partially.py index ec998a2..88995fa 100644 --- a/examples/test_case_id/test_case_id_decorator_params_partially.py +++ b/examples/test_case_id/test_case_id_decorator_params_partially.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator and parameters.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +18,7 @@ TEST_CASE_ID = "ISSUE-213" -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) -@pytest.mark.tc_id(TEST_CASE_ID, parameterized=True, params=['param1']) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) +@pytest.mark.tc_id(TEST_CASE_ID, parameterized=True, params=["param1"]) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_params_true.py b/examples/test_case_id/test_case_id_decorator_params_true.py index d04283f..bb3db5a 100644 --- a/examples/test_case_id/test_case_id_decorator_params_true.py +++ b/examples/test_case_id/test_case_id_decorator_params_true.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator and parameters.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +18,7 @@ TEST_CASE_ID = "ISSUE-231" -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(TEST_CASE_ID, parameterized=True) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_issue_id.py b/examples/test_issue_id.py index 46eadaa..38abcf1 100644 --- a/examples/test_issue_id.py +++ b/examples/test_issue_id.py @@ -13,9 +13,9 @@ import pytest -ID = 'ABC-1234' -REASON = 'some_bug' -TYPE = 'PB' +ID = "ABC-1234" +REASON = "some_bug" +TYPE = "PB" @pytest.mark.issue(issue_id=ID, reason=REASON, issue_type=TYPE) diff --git a/examples/test_issue_id_pass.py b/examples/test_issue_id_pass.py index 889deea..d4347d8 100644 --- a/examples/test_issue_id_pass.py +++ b/examples/test_issue_id_pass.py @@ -13,9 +13,9 @@ import pytest -ID = 'ABC-1234' -REASON = 'some_bug' -TYPE = 'PB' +ID = "ABC-1234" +REASON = "some_bug" +TYPE = "PB" @pytest.mark.issue(issue_id=ID, reason=REASON, issue_type=TYPE) diff --git a/examples/test_max_item_name.py b/examples/test_max_item_name.py index e96d684..4e929a2 100644 --- a/examples/test_max_item_name.py +++ b/examples/test_max_item_name.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_thi_is_simple_example_test_with_the_name_longer_than_maximum_allowed_lorem_ipsum_dolor_sit_amet_consectetur_adipiscing_elit_sed_do_eiusmod_tempor_incididunt_ut_labore_et_dolore_magna_aliqua_ut_enim_ad_minim_veniam_quis_nostrud_exercitation_ullamco_laboris_nisi_ut_aliquip_ex_ea_commodo_consequat_duis_aute_irure_dolor_in_reprehenderit_in_voluptate_velit_esse_cillum_dolore_eu_fugiat_nulla_pariatur_excepteur_sint_occaecat_cupidatat_non_proident_sunt_in_culpa_qui_officia_deserunt_mollit_anim_id_est_laborum_sed_ut_perspiciatis_unde_omnis_iste_natus_error_sit_voluptatem_accusantium_doloremque_laudantium_totam_rem_aperiam_eaque_ipsa_quae_ab_illo_inventore_veritatis_et_quasi_architecto_beatae_vitae_dicta_sunt_explicabo_nemo_enim_ipsam_voluptatem_quia_voluptas_sit_aspernatur_aut_odit_aut_fugit_sed_quia_consequuntur_magni_dolores_eos_qui_ratione_voluptatem_sequi_nesciunt_neque_porro_quisquam_est_qui_dolorem_ipsum_quia_dolor_sit_amet_consectetur_adipisci_velit_sed_quia_non_numquam_eius_modi_tempora_incidunt_ut_labore_et_dolore_magnam_aliquam_quaerat_voluptatem(): # noqa: E501 """Simple example test with the name longer than maximum allowed.""" assert True diff --git a/examples/test_simple.py b/examples/test_simple.py index c63f45d..34f2441 100644 --- a/examples/test_simple.py +++ b/examples/test_simple.py @@ -1,4 +1,5 @@ """Simple example test.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/test_simple_fail.py b/examples/test_simple_fail.py index 6fffad6..91094d5 100644 --- a/examples/test_simple_fail.py +++ b/examples/test_simple_fail.py @@ -1,4 +1,5 @@ """Simple example test which fails.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pyproject.toml b/pyproject.toml index ced8293..a81c860 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,3 +6,13 @@ requires = [ "wheel==0.40.0", ] build-backend = "setuptools.build_meta" + +[tool.isort] +py_version=310 +line_length = 119 +profile = "black" +skip_gitignore = true + +[tool.black] +line-length = 119 +target-version = ["py310"] diff --git a/pytest_reportportal/__init__.py b/pytest_reportportal/__init__.py index 36b85bb..83db8ba 100644 --- a/pytest_reportportal/__init__.py +++ b/pytest_reportportal/__init__.py @@ -13,6 +13,6 @@ """This package contains Pytest agent's code for the Report Portal.""" -__all__ = ['LAUNCH_WAIT_TIMEOUT'] +__all__ = ["LAUNCH_WAIT_TIMEOUT"] LAUNCH_WAIT_TIMEOUT = 10 diff --git a/pytest_reportportal/config.py b/pytest_reportportal/config.py index 0b20d34..092fe87 100644 --- a/pytest_reportportal/config.py +++ b/pytest_reportportal/config.py @@ -15,10 +15,10 @@ import warnings from os import getenv -from typing import Optional, Union, Any, Tuple +from typing import Any, List, Optional, Tuple, Union from _pytest.config import Config -from reportportal_client import OutputType, ClientType +from reportportal_client import ClientType, OutputType from reportportal_client.helpers import to_bool from reportportal_client.logs import MAX_LOG_BATCH_PAYLOAD_SIZE @@ -26,8 +26,7 @@ # This try/except can go away once we support pytest >= 5.4.0 from _pytest.logging import get_actual_log_level except ImportError: - from _pytest.logging import get_log_level_for_setting as \ - get_actual_log_level + from _pytest.logging import get_log_level_for_setting as get_actual_log_level class AgentConfig: @@ -50,7 +49,8 @@ class AgentConfig: rp_bts_url: str rp_launch: str rp_launch_id: Optional[str] - rp_launch_attributes: Optional[list] + rp_launch_attributes: Optional[List[str]] + rp_tests_attributes: Optional[List[str]] rp_launch_description: str rp_log_batch_size: int rp_log_batch_payload_size: int @@ -72,107 +72,106 @@ class AgentConfig: def __init__(self, pytest_config: Config) -> None: """Initialize required attributes.""" - self.rp_rerun = (pytest_config.option.rp_rerun or pytest_config.getini('rp_rerun')) - self.rp_endpoint = self.find_option(pytest_config, 'rp_endpoint') - self.rp_hierarchy_code = to_bool(self.find_option(pytest_config, 'rp_hierarchy_code')) - self.rp_dir_level = int(self.find_option(pytest_config, 'rp_hierarchy_dirs_level')) - self.rp_hierarchy_dirs = to_bool(self.find_option(pytest_config, 'rp_hierarchy_dirs')) - self.rp_dir_path_separator = self.find_option(pytest_config, 'rp_hierarchy_dir_path_separator') - self.rp_hierarchy_test_file = to_bool(self.find_option(pytest_config, 'rp_hierarchy_test_file')) - self.rp_ignore_attributes = set(self.find_option(pytest_config, 'rp_ignore_attributes') or []) - self.rp_is_skipped_an_issue = self.find_option(pytest_config, 'rp_is_skipped_an_issue') - self.rp_issue_id_marks = self.find_option(pytest_config, 'rp_issue_id_marks') - self.rp_bts_issue_url = self.find_option(pytest_config, 'rp_bts_issue_url') + self.rp_rerun = pytest_config.option.rp_rerun or pytest_config.getini("rp_rerun") + self.rp_endpoint = self.find_option(pytest_config, "rp_endpoint") + self.rp_hierarchy_code = to_bool(self.find_option(pytest_config, "rp_hierarchy_code")) + self.rp_dir_level = int(self.find_option(pytest_config, "rp_hierarchy_dirs_level")) + self.rp_hierarchy_dirs = to_bool(self.find_option(pytest_config, "rp_hierarchy_dirs")) + self.rp_dir_path_separator = self.find_option(pytest_config, "rp_hierarchy_dir_path_separator") + self.rp_hierarchy_test_file = to_bool(self.find_option(pytest_config, "rp_hierarchy_test_file")) + self.rp_ignore_attributes = set(self.find_option(pytest_config, "rp_ignore_attributes") or []) + self.rp_is_skipped_an_issue = self.find_option(pytest_config, "rp_is_skipped_an_issue") + self.rp_issue_id_marks = self.find_option(pytest_config, "rp_issue_id_marks") + self.rp_bts_issue_url = self.find_option(pytest_config, "rp_bts_issue_url") if not self.rp_bts_issue_url: - self.rp_bts_issue_url = self.find_option(pytest_config, 'rp_issue_system_url') + self.rp_bts_issue_url = self.find_option(pytest_config, "rp_issue_system_url") if self.rp_bts_issue_url: warnings.warn( - 'Parameter `rp_issue_system_url` is deprecated since 5.4.0 and will be subject for removing' - 'in the next major version. Use `rp_bts_issue_url` argument instead.', + "Parameter `rp_issue_system_url` is deprecated since 5.4.0 and will be subject for removing" + "in the next major version. Use `rp_bts_issue_url` argument instead.", DeprecationWarning, - 2 + 2, ) - self.rp_bts_project = self.find_option(pytest_config, 'rp_bts_project') - self.rp_bts_url = self.find_option(pytest_config, 'rp_bts_url') - self.rp_launch = self.find_option(pytest_config, 'rp_launch') - self.rp_launch_id = self.find_option(pytest_config, 'rp_launch_id') - self.rp_launch_attributes = self.find_option(pytest_config, 'rp_launch_attributes') - self.rp_launch_description = self.find_option(pytest_config, 'rp_launch_description') - self.rp_log_batch_size = int(self.find_option(pytest_config, 'rp_log_batch_size')) - batch_payload_size = self.find_option(pytest_config, 'rp_log_batch_payload_size') + self.rp_bts_project = self.find_option(pytest_config, "rp_bts_project") + self.rp_bts_url = self.find_option(pytest_config, "rp_bts_url") + self.rp_launch = self.find_option(pytest_config, "rp_launch") + self.rp_launch_id = self.find_option(pytest_config, "rp_launch_id") + self.rp_launch_attributes = self.find_option(pytest_config, "rp_launch_attributes") + self.rp_tests_attributes = self.find_option(pytest_config, "rp_tests_attributes") + self.rp_launch_description = self.find_option(pytest_config, "rp_launch_description") + self.rp_log_batch_size = int(self.find_option(pytest_config, "rp_log_batch_size")) + batch_payload_size = self.find_option(pytest_config, "rp_log_batch_payload_size") if batch_payload_size: self.rp_log_batch_payload_size = int(batch_payload_size) else: self.rp_log_batch_payload_size = MAX_LOG_BATCH_PAYLOAD_SIZE - self.rp_log_level = get_actual_log_level(pytest_config, 'rp_log_level') - self.rp_log_format = self.find_option(pytest_config, 'rp_log_format') - self.rp_thread_logging = to_bool(self.find_option(pytest_config, 'rp_thread_logging') or False) - self.rp_mode = self.find_option(pytest_config, 'rp_mode') - self.rp_parent_item_id = self.find_option(pytest_config, 'rp_parent_item_id') - self.rp_project = self.find_option(pytest_config, 'rp_project') - self.rp_rerun_of = self.find_option(pytest_config, 'rp_rerun_of') - self.rp_skip_connection_test = to_bool(self.find_option(pytest_config, 'rp_skip_connection_test')) - - rp_api_retries_str = self.find_option(pytest_config, 'rp_api_retries') + self.rp_log_level = get_actual_log_level(pytest_config, "rp_log_level") + self.rp_log_format = self.find_option(pytest_config, "rp_log_format") + self.rp_thread_logging = to_bool(self.find_option(pytest_config, "rp_thread_logging") or False) + self.rp_mode = self.find_option(pytest_config, "rp_mode") + self.rp_parent_item_id = self.find_option(pytest_config, "rp_parent_item_id") + self.rp_project = self.find_option(pytest_config, "rp_project") + self.rp_rerun_of = self.find_option(pytest_config, "rp_rerun_of") + self.rp_skip_connection_test = to_bool(self.find_option(pytest_config, "rp_skip_connection_test")) + + rp_api_retries_str = self.find_option(pytest_config, "rp_api_retries") rp_api_retries = rp_api_retries_str and int(rp_api_retries_str) if rp_api_retries and rp_api_retries > 0: self.rp_api_retries = rp_api_retries else: - rp_api_retries_str = self.find_option(pytest_config, 'retries') + rp_api_retries_str = self.find_option(pytest_config, "retries") rp_api_retries = rp_api_retries_str and int(rp_api_retries_str) if rp_api_retries and rp_api_retries > 0: self.rp_api_retries = rp_api_retries warnings.warn( - 'Parameter `retries` is deprecated since 5.1.9 ' - 'and will be subject for removing in the next ' - 'major version. Use `rp_api_retries` argument ' - 'instead.', + "Parameter `retries` is deprecated since 5.1.9 " + "and will be subject for removing in the next " + "major version. Use `rp_api_retries` argument " + "instead.", DeprecationWarning, - 2 + 2, ) else: self.rp_api_retries = 0 - self.rp_api_key = getenv( - 'RP_API_KEY') or self.find_option(pytest_config, 'rp_api_key') + self.rp_api_key = getenv("RP_API_KEY") or self.find_option(pytest_config, "rp_api_key") if not self.rp_api_key: - self.rp_api_key = getenv( - 'RP_UUID') or self.find_option(pytest_config, 'rp_uuid') + self.rp_api_key = getenv("RP_UUID") or self.find_option(pytest_config, "rp_uuid") if self.rp_api_key: warnings.warn( - 'Parameter `rp_uuid` is deprecated since 5.1.9 ' - 'and will be subject for removing in the next ' - 'major version. Use `rp_api_key` argument ' - 'instead.', + "Parameter `rp_uuid` is deprecated since 5.1.9 " + "and will be subject for removing in the next " + "major version. Use `rp_api_key` argument " + "instead.", DeprecationWarning, - 2 + 2, ) else: warnings.warn( - 'Argument `rp_api_key` is `None` or empty string, ' - 'that is not supposed to happen because Report ' - 'Portal is usually requires an authorization key. ' - 'Please check your configuration.', + "Argument `rp_api_key` is `None` or empty string, " + "that is not supposed to happen because Report " + "Portal is usually requires an authorization key. " + "Please check your configuration.", RuntimeWarning, - 2 + 2, ) - rp_verify_ssl = self.find_option(pytest_config, 'rp_verify_ssl', True) + rp_verify_ssl = self.find_option(pytest_config, "rp_verify_ssl", True) try: self.rp_verify_ssl = to_bool(rp_verify_ssl) except (ValueError, AttributeError): self.rp_verify_ssl = rp_verify_ssl - self.rp_launch_timeout = int(self.find_option(pytest_config, 'rp_launch_timeout')) + self.rp_launch_timeout = int(self.find_option(pytest_config, "rp_launch_timeout")) - self.rp_launch_uuid_print = to_bool(self.find_option(pytest_config, 'rp_launch_uuid_print') or 'False') - print_output = self.find_option(pytest_config, 'rp_launch_uuid_print_output') + self.rp_launch_uuid_print = to_bool(self.find_option(pytest_config, "rp_launch_uuid_print") or "False") + print_output = self.find_option(pytest_config, "rp_launch_uuid_print_output") self.rp_launch_uuid_print_output = OutputType[print_output.upper()] if print_output else None - client_type = self.find_option(pytest_config, 'rp_client_type') + client_type = self.find_option(pytest_config, "rp_client_type") self.rp_client_type = ClientType[client_type.upper()] if client_type else ClientType.SYNC - connect_timeout = self.find_option(pytest_config, 'rp_connect_timeout') + connect_timeout = self.find_option(pytest_config, "rp_connect_timeout") connect_timeout = float(connect_timeout) if connect_timeout else None - read_timeout = self.find_option(pytest_config, 'rp_read_timeout') + read_timeout = self.find_option(pytest_config, "rp_read_timeout") read_timeout = float(read_timeout) if read_timeout else None if connect_timeout is None and read_timeout is None: self.rp_http_timeout = None @@ -180,7 +179,7 @@ def __init__(self, pytest_config: Config) -> None: self.rp_http_timeout = (connect_timeout, read_timeout) else: self.rp_http_timeout = connect_timeout or read_timeout - self.rp_report_fixtures = to_bool(self.find_option(pytest_config, 'rp_report_fixtures', False)) + self.rp_report_fixtures = to_bool(self.find_option(pytest_config, "rp_report_fixtures", False)) # noinspection PyMethodMayBeStatic def find_option(self, pytest_config: Config, option_name: str, default: Any = None) -> Any: @@ -197,7 +196,7 @@ def find_option(self, pytest_config: Config, option_name: str, default: Any = No :param default: value to be returned if not found :return: option value """ - value = (getattr(pytest_config.option, option_name, None) or pytest_config.getini(option_name)) + value = getattr(pytest_config.option, option_name, None) or pytest_config.getini(option_name) if isinstance(value, bool): return value return value or default diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index 4f6d462..84832c2 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -17,34 +17,49 @@ import os.path import time from logging import Logger -from typing import Any +from typing import Any, Callable, Dict, Generator import _pytest.logging import dill as pickle import pytest + # noinspection PyPackageRequirements import requests -from pytest import Session, Item -from reportportal_client import RPLogHandler, RP +from pytest import Item, Session +from reportportal_client import RP, RPLogHandler from reportportal_client.errors import ResponseError from reportportal_client.logs import MAX_LOG_BATCH_PAYLOAD_SIZE from pytest_reportportal import LAUNCH_WAIT_TIMEOUT from pytest_reportportal.config import AgentConfig from pytest_reportportal.rp_logging import patching_logger_class, patching_thread_class -from pytest_reportportal.service import PyTestServiceClass +from pytest_reportportal.service import PyTestService + +try: + # noinspection PyPackageRequirements + from pytest_bdd.parser import Feature, Scenario, Step + + PYTEST_BDD = True +except ImportError: + Feature = type("dummy", (), {}) + Scenario = type("dummy", (), {}) + Step = type("dummy", (), {}) + PYTEST_BDD = False -log: Logger = logging.getLogger(__name__) +LOGGER: Logger = logging.getLogger(__name__) -MANDATORY_PARAMETER_MISSED_PATTERN: str = \ - 'One of the following mandatory parameters is unset: ' + \ - 'rp_project: {}, ' + \ - 'rp_endpoint: {}, ' + \ - 'rp_api_key: {}' +MANDATORY_PARAMETER_MISSED_PATTERN: str = ( + "One of the following mandatory parameters is unset: " + + "rp_project: {}, " + + "rp_endpoint: {}, " + + "rp_api_key: {}" +) -FAILED_LAUNCH_WAIT: str = 'Failed to initialize reportportal-client service. ' \ - + 'Waiting for Launch start timed out. ' \ - + 'Reporting is disabled.' +FAILED_LAUNCH_WAIT: str = ( + "Failed to initialize reportportal-client service. " + + "Waiting for Launch start timed out. " + + "Reporting is disabled." +) @pytest.hookimpl(optionalhook=True) @@ -57,7 +72,7 @@ def pytest_configure_node(node: Any) -> None: if not node.config._rp_enabled: # Stop now if the plugin is not properly configured return - node.workerinput['py_test_service'] = pickle.dumps(node.config.py_test_service) + node.workerinput["py_test_service"] = pickle.dumps(node.config.py_test_service) # no 'config' type for backward compatibility for older pytest versions @@ -67,7 +82,7 @@ def is_control(config) -> bool: True if the code, running the given pytest.config object, is running as the xdist control node or not running xdist at all. """ - return not hasattr(config, 'workerinput') + return not hasattr(config, "workerinput") def wait_launch(rp_client: RP) -> bool: @@ -99,19 +114,17 @@ def pytest_sessionstart(session: Session) -> None: try: config.py_test_service.start() except ResponseError as response_error: - log.warning('Failed to initialize reportportal-client service. ' - 'Reporting is disabled.') - log.debug(str(response_error)) + LOGGER.warning("Failed to initialize reportportal-client service. " "Reporting is disabled.") + LOGGER.debug(str(response_error)) config.py_test_service.rp = None config._rp_enabled = False return if is_control(config): config.py_test_service.start_launch() - if config.pluginmanager.hasplugin('xdist') \ - or config.pluginmanager.hasplugin('pytest-parallel'): + if config.pluginmanager.hasplugin("xdist") or config.pluginmanager.hasplugin("pytest-parallel"): if not wait_launch(session.config.py_test_service.rp): - log.error(FAILED_LAUNCH_WAIT) + LOGGER.error(FAILED_LAUNCH_WAIT) config.py_test_service.rp = None config._rp_enabled = False @@ -154,20 +167,19 @@ def register_markers(config) -> None: :param config: Object of the pytest Config class """ config.addinivalue_line( - "markers", "issue(issue_id, reason, issue_type, url): mark test with " - "information about skipped or failed result" + "markers", + "issue(issue_id, reason, issue_type, url): mark test with " "information about skipped or failed result", ) config.addinivalue_line( - "markers", "tc_id(id, parameterized, params): report the test" - "case with a custom Test Case ID. Parameters: \n" - "parameterized [True / False] - use parameter values in " - "Test Case ID generation \n" - "params [parameter names as list] - use only specified" - "parameters" - ) - config.addinivalue_line( - "markers", "name(name): report the test case with a custom Name." + "markers", + "tc_id(id, parameterized, params): report the test" + "case with a custom Test Case ID. Parameters: \n" + "parameterized [True / False] - use parameter values in " + "Test Case ID generation \n" + "params [parameter names as list] - use only specified" + "parameters", ) + config.addinivalue_line("markers", "name(name): report the test case with a custom Name.") def check_connection(agent_config: AgentConfig): @@ -177,15 +189,15 @@ def check_connection(agent_config: AgentConfig): :param agent_config: Instance of the AgentConfig class :return True on successful connection check, either False """ - url = '{0}/api/v1/project/{1}'.format(agent_config.rp_endpoint, agent_config.rp_project) - headers = {'Authorization': 'bearer {0}'.format(agent_config.rp_api_key)} + url = "{0}/api/v1/project/{1}".format(agent_config.rp_endpoint, agent_config.rp_project) + headers = {"Authorization": "bearer {0}".format(agent_config.rp_api_key)} try: resp = requests.get(url, headers=headers, verify=agent_config.rp_verify_ssl) resp.raise_for_status() return True except requests.exceptions.RequestException as exc: - log.exception(exc) - log.error("Unable to connect to Report Portal, the launch won't be reported") + LOGGER.exception(exc) + LOGGER.error("Unable to connect to Report Portal, the launch won't be reported") return False @@ -199,42 +211,41 @@ def pytest_configure(config) -> None: register_markers(config) config._rp_enabled = not ( - config.getoption('--collect-only', default=False) or - config.getoption('--setup-plan', default=False) or - not config.option.rp_enabled) + config.getoption("--collect-only", default=False) + or config.getoption("--setup-plan", default=False) + or not config.option.rp_enabled + ) if not config._rp_enabled: return agent_config = AgentConfig(config) - cond = (agent_config.rp_project, agent_config.rp_endpoint, - agent_config.rp_api_key) + cond = (agent_config.rp_project, agent_config.rp_endpoint, agent_config.rp_api_key) config._rp_enabled = all(cond) if not config._rp_enabled: - log.debug(MANDATORY_PARAMETER_MISSED_PATTERN.format(*cond)) - log.debug('Disabling reporting to RP.') + LOGGER.debug(MANDATORY_PARAMETER_MISSED_PATTERN.format(*cond)) + LOGGER.debug("Disabling reporting to RP.") return if not agent_config.rp_skip_connection_test: config._rp_enabled = check_connection(agent_config) if not config._rp_enabled: - log.debug('Failed to establish connection with RP. ' - 'Disabling reporting.') + LOGGER.debug("Failed to establish connection with RP. " "Disabling reporting.") return config._reporter_config = agent_config if is_control(config): - config.py_test_service = PyTestServiceClass(agent_config) + config.py_test_service = PyTestService(agent_config) else: # noinspection PyUnresolvedReferences - config.py_test_service = pickle.loads(config.workerinput['py_test_service']) + config.py_test_service = pickle.loads(config.workerinput["py_test_service"]) # noinspection PyProtectedMember @pytest.hookimpl(hookwrapper=True) -def pytest_runtestloop(session: Session) -> None: +def pytest_runtestloop(session: Session) -> Generator[None, Any, None]: """ Control start and finish of all test items in the session. @@ -253,7 +264,7 @@ def pytest_runtestloop(session: Session) -> None: # noinspection PyProtectedMember @pytest.hookimpl(hookwrapper=True) -def pytest_runtest_protocol(item: Item) -> None: +def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: """Control start and finish of pytest items. :param item: Pytest.Item @@ -267,38 +278,41 @@ def pytest_runtest_protocol(item: Item) -> None: service = config.py_test_service agent_config = config._reporter_config service.start_pytest_item(item) + log_level = agent_config.rp_log_level or logging.NOTSET log_handler = RPLogHandler( - level=log_level, filter_client_logs=True, endpoint=agent_config.rp_endpoint, - ignored_record_names=('reportportal_client', 'pytest_reportportal')) + level=log_level, + filter_client_logs=True, + endpoint=agent_config.rp_endpoint, + ignored_record_names=("reportportal_client", "pytest_reportportal"), + ) log_format = agent_config.rp_log_format if log_format: log_handler.setFormatter(logging.Formatter(log_format)) with patching_logger_class(): with _pytest.logging.catching_logs(log_handler, level=log_level): yield + service.finish_pytest_item(item) # noinspection PyProtectedMember @pytest.hookimpl(hookwrapper=True) -def pytest_runtest_makereport(item: Item) -> None: +def pytest_runtest_makereport(item: Item) -> Generator[None, Any, None]: """Change runtest_makereport function. :param item: pytest.Item :return: None """ - config = item.config - if not config._rp_enabled: - yield + result = yield + if not item.config._rp_enabled: return - - report = (yield).get_result() + report = result.get_result() service = item.config.py_test_service service.process_results(item, report) -def report_fixture(request, fixturedef, name: str, error_msg: str) -> None: +def report_fixture(request, fixturedef, name: str, error_msg: str) -> Generator[None, Any, None]: """Report fixture setup and teardown. :param request: Object of the FixtureRequest class @@ -307,18 +321,18 @@ def report_fixture(request, fixturedef, name: str, error_msg: str) -> None: :param error_msg: Error message """ config = request.config - enabled = getattr(config, '_rp_enabled', False) - service = getattr(config, 'py_test_service', None) - agent_config = getattr(config, '_reporter_config', object()) - report_fixtures = getattr(agent_config, 'rp_report_fixtures', False) + enabled = getattr(config, "_rp_enabled", False) + service = getattr(config, "py_test_service", None) + agent_config = getattr(config, "_reporter_config", object()) + report_fixtures = getattr(agent_config, "rp_report_fixtures", False) if not enabled or not service or not report_fixtures: yield return - cached_result = getattr(fixturedef, 'cached_result', None) - if cached_result and hasattr(cached_result, '__getitem__'): + cached_result = getattr(fixturedef, "cached_result", None) + if cached_result and hasattr(cached_result, "__getitem__"): result = fixturedef.cached_result[2] - if hasattr(result, '__getitem__'): + if hasattr(result, "__getitem__"): result = result[0] if result and isinstance(result, BaseException): yield @@ -329,28 +343,178 @@ def report_fixture(request, fixturedef, name: str, error_msg: str) -> None: # no types for backward compatibility for older pytest versions @pytest.hookimpl(hookwrapper=True) -def pytest_fixture_setup(fixturedef, request) -> None: +def pytest_fixture_setup(fixturedef, request) -> Generator[None, Any, None]: """Report fixture setup. :param fixturedef: represents definition of the texture class :param request: represents fixture execution metadata """ yield from report_fixture( - request, fixturedef, f'{fixturedef.scope} fixture setup: {fixturedef.argname}', - f'{fixturedef.scope} fixture setup failed: {fixturedef.argname}') + request, + fixturedef, + f"{fixturedef.scope} fixture setup: {fixturedef.argname}", + f"{fixturedef.scope} fixture setup failed: {fixturedef.argname}", + ) # no types for backward compatibility for older pytest versions @pytest.hookimpl(hookwrapper=True) -def pytest_fixture_post_finalizer(fixturedef, request) -> None: +def pytest_fixture_post_finalizer(fixturedef, request) -> Generator[None, Any, None]: """Report fixture teardown. :param fixturedef: represents definition of the texture class :param request: represents fixture execution metadata """ yield from report_fixture( - request, fixturedef, f'{fixturedef.scope} fixture teardown: {fixturedef.argname}', - f'{fixturedef.scope} fixture teardown failed: {fixturedef.argname}') + request, + fixturedef, + f"{fixturedef.scope} fixture teardown: {fixturedef.argname}", + f"{fixturedef.scope} fixture teardown failed: {fixturedef.argname}", + ) + + +if PYTEST_BDD: + + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_before_scenario(request, feature: Feature, scenario: Scenario) -> Generator[None, Any, None]: + """Report BDD scenario start. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + """ + config = request.config + # noinspection PyProtectedMember + if not config._rp_enabled: + yield + return + service = config.py_test_service + service.start_bdd_scenario(feature, scenario) + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> Generator[None, Any, None]: + """Report BDD scenario finish. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + """ + config = request.config + # noinspection PyProtectedMember + if not config._rp_enabled: + yield + return + + yield + service = config.py_test_service + service.finish_bdd_scenario(feature, scenario) + + # noinspection PyUnusedLocal + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_before_step( + request, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any] + ) -> Generator[None, Any, None]: + """Report BDD step start. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + :param step: represents step from scenario + :param step_func: represents function for step + """ + config = request.config + # noinspection PyProtectedMember + if not config._rp_enabled: + yield + return + + service = config.py_test_service + service.start_bdd_step(feature, scenario, step) + yield + + # noinspection PyUnusedLocal + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_after_step( + request, + feature: Feature, + scenario: Scenario, + step: Step, + step_func: Callable[..., Any], + step_func_args: Dict[str, Any], + ) -> Generator[None, Any, None]: + """Report BDD step finish. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + :param step: represents step from scenario + :param step_func: represents function for step + :param step_func_args: represents arguments for step function + """ + config = request.config + # noinspection PyProtectedMember + if not config._rp_enabled: + yield + return + + yield + service = config.py_test_service + service.finish_bdd_step(feature, scenario, step) + + # noinspection PyUnusedLocal + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_step_error( + request, + feature: Feature, + scenario: Scenario, + step: Step, + step_func: Callable[..., Any], + step_func_args: Dict[str, Any], + exception, + ) -> Generator[None, Any, None]: + """Report BDD step error. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + :param step: represents step from scenario + :param step_func: represents function for step + :param step_func_args: represents arguments for step function + :param exception: represents exception + """ + config = request.config + # noinspection PyProtectedMember + if not config._rp_enabled: + yield + return + + yield + service = config.py_test_service + service.finish_bdd_step_error(feature, scenario, step, exception) + + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_step_func_lookup_error( + request, feature: Feature, scenario: Scenario, step: Step, exception + ) -> Generator[None, Any, None]: + """Report BDD step lookup error. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + :param step: represents step from scenario + :param exception: represents exception + """ + config = request.config + # noinspection PyProtectedMember + if not config._rp_enabled: + yield + return + + service = config.py_test_service + service.start_bdd_step(feature, scenario, step) + yield + service.finish_bdd_step_error(feature, scenario, step, exception) # no types for backward compatibility for older pytest versions @@ -359,9 +523,9 @@ def pytest_addoption(parser) -> None: :param parser: Object of the Parser class """ - group = parser.getgroup('reporting') + group = parser.getgroup("reporting") - def add_shared_option(name, help_str, default=None, action='store'): + def add_shared_option(name, help_str, default=None, action="store"): """ Add an option to both the command line and the .ini file. @@ -378,208 +542,149 @@ def add_shared_option(name, help_str, default=None, action='store'): help=help_str, ) group.addoption( - '--{0}'.format(name.replace('_', '-')), + "--{0}".format(name.replace("_", "-")), action=action, dest=name, - help='{help} (overrides {name} config option)'.format( + help="{help} (overrides {name} config option)".format( help=help_str, name=name, ), ) group.addoption( - '--reportportal', - action='store_true', - dest='rp_enabled', - default=False, - help='Enable ReportPortal plugin' + "--reportportal", action="store_true", dest="rp_enabled", default=False, help="Enable ReportPortal plugin" ) add_shared_option( - name='rp_launch', - help_str='Launch name', - default='Pytest Launch', + name="rp_launch", + help_str="Launch name", + default="Pytest Launch", ) add_shared_option( - name='rp_launch_id', - help_str='Use already existing launch-id. The plugin won\'t control ' - 'the Launch status', + name="rp_launch_id", + help_str="Use already existing launch-id. The plugin won't control " "the Launch status", ) add_shared_option( - name='rp_launch_description', - help_str='Launch description', - default='', + name="rp_launch_description", + help_str="Launch description", + default="", ) - add_shared_option(name='rp_project', help_str='Project name') + add_shared_option(name="rp_project", help_str="Project name") add_shared_option( - name='rp_log_level', - help_str='Logging level for automated log records reporting', + name="rp_log_level", + help_str="Logging level for automated log records reporting", ) add_shared_option( - name='rp_log_format', - help_str='Logging format for automated log records reporting', + name="rp_log_format", + help_str="Logging format for automated log records reporting", ) add_shared_option( - name='rp_rerun', - help_str='Marks the launch as a rerun', + name="rp_rerun", + help_str="Marks the launch as a rerun", default=False, - action='store_true', - ) - add_shared_option( - name='rp_rerun_of', - help_str='ID of the launch to be marked as a rerun (use only with ' - 'rp_rerun=True)', - default='', - ) - add_shared_option( - name='rp_parent_item_id', - help_str='Create all test item as child items of the given (already ' - 'existing) item.', + action="store_true", ) - add_shared_option(name='rp_uuid', help_str='Deprecated: use `rp_api_key` ' - 'instead.') add_shared_option( - name='rp_api_key', - help_str='API key of Report Portal. Usually located on UI profile ' - 'page.' + name="rp_rerun_of", + help_str="ID of the launch to be marked as a rerun (use only with " "rp_rerun=True)", + default="", ) - add_shared_option(name='rp_endpoint', help_str='Server endpoint') add_shared_option( - name='rp_mode', - help_str='Visibility of current launch [DEFAULT, DEBUG]', - default='DEFAULT' + name="rp_parent_item_id", + help_str="Create all test item as child items of the given (already " "existing) item.", ) + add_shared_option(name="rp_uuid", help_str="Deprecated: use `rp_api_key` " "instead.") + add_shared_option(name="rp_api_key", help_str="API key of Report Portal. Usually located on UI profile " "page.") + add_shared_option(name="rp_endpoint", help_str="Server endpoint") + add_shared_option(name="rp_mode", help_str="Visibility of current launch [DEFAULT, DEBUG]", default="DEFAULT") add_shared_option( - name='rp_thread_logging', - help_str='EXPERIMENTAL: Report logs from threads. ' - 'This option applies a patch to the builtin Thread class, ' - 'and so it is turned off by default. Use with caution.', + name="rp_thread_logging", + help_str="EXPERIMENTAL: Report logs from threads. " + "This option applies a patch to the builtin Thread class, " + "and so it is turned off by default. Use with caution.", default=False, - action='store_true' + action="store_true", ) add_shared_option( - name='rp_launch_uuid_print', - help_str='Enables printing Launch UUID on test run start. Possible values: [True, False]' + name="rp_launch_uuid_print", + help_str="Enables printing Launch UUID on test run start. Possible values: [True, False]", ) add_shared_option( - name='rp_launch_uuid_print_output', - help_str='Launch UUID print output. Default `stdout`. Possible values: [stderr, stdout]' + name="rp_launch_uuid_print_output", + help_str="Launch UUID print output. Default `stdout`. Possible values: [stderr, stdout]", ) + parser.addini("rp_launch_attributes", type="args", help="Launch attributes, i.e Performance Regression") + parser.addini("rp_tests_attributes", type="args", help="Attributes for all tests items, e.g. Smoke") + parser.addini("rp_log_batch_size", default="20", help="Size of batch log requests in async mode") parser.addini( - 'rp_launch_attributes', - type='args', - help='Launch attributes, i.e Performance Regression') - parser.addini( - 'rp_tests_attributes', - type='args', - help='Attributes for all tests items, e.g. Smoke') - parser.addini( - 'rp_log_batch_size', - default='20', - help='Size of batch log requests in async mode') - parser.addini( - 'rp_log_batch_payload_size', + "rp_log_batch_payload_size", default=str(MAX_LOG_BATCH_PAYLOAD_SIZE), - help='Maximum payload size in bytes of async batch log requests') - parser.addini( - 'rp_ignore_attributes', - type='args', - help='Ignore specified pytest markers, i.e parametrize') - parser.addini( - 'rp_is_skipped_an_issue', - default=True, - type='bool', - help='Treat skipped tests as required investigation') - parser.addini( - 'rp_hierarchy_code', - default=False, - type='bool', - help='Enables hierarchy for code') - parser.addini( - 'rp_hierarchy_dirs_level', - default='0', - help='Directory starting hierarchy level') + help="Maximum payload size in bytes of async batch log requests", + ) + parser.addini("rp_ignore_attributes", type="args", help="Ignore specified pytest markers, i.e parametrize") parser.addini( - 'rp_hierarchy_dirs', - default=False, - type='bool', - help='Enables hierarchy for directories') + "rp_is_skipped_an_issue", default=True, type="bool", help="Treat skipped tests as required investigation" + ) + parser.addini("rp_hierarchy_code", default=False, type="bool", help="Enables hierarchy for code") + parser.addini("rp_hierarchy_dirs_level", default="0", help="Directory starting hierarchy level") + parser.addini("rp_hierarchy_dirs", default=False, type="bool", help="Enables hierarchy for directories") parser.addini( - 'rp_hierarchy_dir_path_separator', + "rp_hierarchy_dir_path_separator", default=os.path.sep, - help='Path separator to display directories in test hierarchy') - parser.addini( - 'rp_hierarchy_test_file', - default=True, - type='bool', - help='Show file name in hierarchy') - parser.addini( - 'rp_issue_system_url', - default='', - help='URL to get issue description. Issue id from pytest mark will be added to this URL. ' - 'Deprecated: use "rp_bts_issue_url".') - parser.addini( - 'rp_bts_issue_url', - default='', - help='URL to get issue description. Issue ID from pytest mark will be added to this URL by replacing ' - '"{issue_id}" placeholder.') - parser.addini( - 'rp_bts_project', - default='', - help='Bug-tracking system project as it configured on Report Portal ' - 'server. To enable runtime external issue reporting you need to ' - 'specify this and "rp_bts_url" property.') - parser.addini( - 'rp_bts_url', - default='', - help='URL of bug-tracking system as it configured on Report Portal ' - 'server. To enable runtime external issue reporting you need to ' - 'specify this and "rp_bts_project" property.') - parser.addini( - 'rp_verify_ssl', - default='True', - help='True/False - verify HTTPS calls, or path to a CA_BUNDLE or ' - 'directory with certificates of trusted CAs.') - parser.addini( - 'rp_issue_id_marks', - type='bool', - default=True, - help='Add tag with issue id to the test') + help="Path separator to display directories in test hierarchy", + ) + parser.addini("rp_hierarchy_test_file", default=True, type="bool", help="Show file name in hierarchy") parser.addini( - 'retries', - default='0', - help='Deprecated: use `rp_api_retries` instead') + "rp_issue_system_url", + default="", + help="URL to get issue description. Issue id from pytest mark will be added to this URL. " + 'Deprecated: use "rp_bts_issue_url".', + ) parser.addini( - 'rp_api_retries', - default='0', - help='Amount of retries for performing REST calls to RP server') + "rp_bts_issue_url", + default="", + help="URL to get issue description. Issue ID from pytest mark will be added to this URL by replacing " + '"{issue_id}" placeholder.', + ) parser.addini( - 'rp_skip_connection_test', - default=False, - type='bool', - help='Skip Report Portal connection test') + "rp_bts_project", + default="", + help="Bug-tracking system project as it configured on Report Portal " + "server. To enable runtime external issue reporting you need to " + 'specify this and "rp_bts_url" property.', + ) parser.addini( - 'rp_launch_timeout', - default=86400, - help='Maximum time to wait for child processes finish, default value: ' - '86400 seconds (1 day)' + "rp_bts_url", + default="", + help="URL of bug-tracking system as it configured on Report Portal " + "server. To enable runtime external issue reporting you need to " + 'specify this and "rp_bts_project" property.', ) parser.addini( - 'rp_client_type', - help='Type of the under-the-hood ReportPortal client implementation. Possible values: [SYNC, ASYNC_THREAD, ' - 'ASYNC_BATCHED]' + "rp_verify_ssl", + default="True", + help="True/False - verify HTTPS calls, or path to a CA_BUNDLE or " + "directory with certificates of trusted CAs.", ) + parser.addini("rp_issue_id_marks", type="bool", default=True, help="Add tag with issue id to the test") + parser.addini("retries", default="0", help="Deprecated: use `rp_api_retries` instead") + parser.addini("rp_api_retries", default="0", help="Amount of retries for performing REST calls to RP server") + parser.addini("rp_skip_connection_test", default=False, type="bool", help="Skip Report Portal connection test") parser.addini( - 'rp_connect_timeout', - help='Connection timeout to ReportPortal server' + "rp_launch_timeout", + default=86400, + help="Maximum time to wait for child processes finish, default value: " "86400 seconds (1 day)", ) parser.addini( - 'rp_read_timeout', - help='Response read timeout for ReportPortal connection' + "rp_client_type", + help="Type of the under-the-hood ReportPortal client implementation. Possible values: [SYNC, ASYNC_THREAD, " + "ASYNC_BATCHED]", ) + parser.addini("rp_connect_timeout", help="Connection timeout to ReportPortal server") + parser.addini("rp_read_timeout", help="Response read timeout for ReportPortal connection") parser.addini( - 'rp_report_fixtures', + "rp_report_fixtures", default=False, - type='bool', - help='Enable reporting fixtures as test items. Possible values: [True, False]' + type="bool", + help="Enable reporting fixtures as test items. Possible values: [True, False]", ) diff --git a/pytest_reportportal/rp_logging.py b/pytest_reportportal/rp_logging.py index 650082f..a53ce81 100644 --- a/pytest_reportportal/rp_logging.py +++ b/pytest_reportportal/rp_logging.py @@ -13,25 +13,24 @@ """RPLogger class for low-level logging in tests.""" -import sys import logging +import sys import threading from contextlib import contextmanager from functools import wraps -from typing import Any +from typing import Any, Dict, List -from reportportal_client import current, set_current -from reportportal_client import RPLogger +from reportportal_client import RPLogger, current, set_current from reportportal_client.core.worker import APIWorker def is_api_worker(target): """Check if target is an RP worker thread.""" if target: - method_name = getattr(target, '__name__', None) - method_self = getattr(target, '__self__', None) - if method_name == '_monitor' and method_self: - clazz = getattr(method_self, '__class__', None) + method_name = getattr(target, "__name__", None) + method_self = getattr(target, "__self__", None) + if method_name == "_monitor" and method_self: + clazz = getattr(method_self, "__class__", None) if clazz is APIWorker: return True return False @@ -51,12 +50,13 @@ def patching_thread_class(config): original_start = threading.Thread.start original_run = threading.Thread.run try: + def wrap_start(original_func): @wraps(original_func) def _start(self, *args, **kwargs): """Save the invoking thread's client if there is one.""" # Prevent an endless loop of workers being spawned - target = getattr(self, '_target', None) + target = getattr(self, "_target", None) if not is_api_worker(self) and not is_api_worker(target): current_client = current() self.parent_rp_client = current_client @@ -69,11 +69,7 @@ def wrap_run(original_func): def _run(self, *args, **kwargs): """Create a new client for the invoked thread.""" client = None - if ( - hasattr(self, "parent_rp_client") - and self.parent_rp_client - and not current() - ): + if hasattr(self, "parent_rp_client") and self.parent_rp_client and not current(): parent = self.parent_rp_client client = parent.clone() try: @@ -115,44 +111,43 @@ def patching_logger_class(): original_makeRecord = logger_class.makeRecord try: + def wrap_log(original_func): @wraps(original_func) - def _log(self, *args: list[Any], **kwargs: dict[str, Any]): + def _log(self, *args: List[Any], **kwargs: Dict[str, Any]): my_kwargs = kwargs.copy() - attachment = my_kwargs.pop('attachment', None) + attachment = my_kwargs.pop("attachment", None) if attachment is not None: - my_kwargs.setdefault('extra', {}).update({'attachment': attachment}) + my_kwargs.setdefault("extra", {}).update({"attachment": attachment}) # Python 3.11 start catches stack frames in wrappers, # so add additional stack level skip to not show it if sys.version_info >= (3, 11): - if 'stacklevel' in my_kwargs: - my_kwargs['stacklevel'] = my_kwargs['stacklevel'] + 1 + if "stacklevel" in my_kwargs: + my_kwargs["stacklevel"] = my_kwargs["stacklevel"] + 1 else: - my_kwargs['stacklevel'] = 2 + my_kwargs["stacklevel"] = 2 return original_func(self, *args, **my_kwargs) else: return original_func(self, *args, **my_kwargs) + return _log def wrap_makeRecord(original_func): @wraps(original_func) - def makeRecord(self, name, level, fn, lno, msg, args, exc_info, - func=None, extra=None, sinfo=None): + def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): if extra is not None: - attachment = extra.pop('attachment', None) + attachment = extra.pop("attachment", None) else: attachment = None try: # Python 3.5 - record = original_func(self, name, level, fn, lno, msg, - args, exc_info, func=func, - extra=extra, sinfo=sinfo) + record = original_func( + self, name, level, fn, lno, msg, args, exc_info, func=func, extra=extra, sinfo=sinfo + ) except TypeError: # Python 2.7 - record = original_func(self, name, level, fn, lno, msg, - args, exc_info, func=func, - extra=extra) + record = original_func(self, name, level, fn, lno, msg, args, exc_info, func=func, extra=extra) record.attachment = attachment return record diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 31a593f..43be6ad 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -15,20 +15,23 @@ import logging import os.path +import re import sys import threading +import traceback +from collections import OrderedDict from functools import wraps from os import curdir -from time import time, sleep -from typing import List, Any, Optional, Set, Dict, Tuple, Union, Callable +from time import sleep, time +from typing import Any, Callable, Dict, Generator, List, Optional, Set, Union from _pytest.doctest import DoctestItem -from aenum import auto, Enum, unique +from aenum import Enum, auto, unique from py.path import local -from pytest import Class, Function, Module, Package, Item, Session, PytestWarning +from pytest import Class, Function, Item, Module, Package, PytestWarning, Session from reportportal_client.aio import Task -from reportportal_client.core.rp_issues import Issue, ExternalIssue -from reportportal_client.helpers import timestamp +from reportportal_client.core.rp_issues import ExternalIssue, Issue +from reportportal_client.helpers import markdown_helpers, timestamp from .config import AgentConfig @@ -37,30 +40,57 @@ from pytest import Instance except ImportError: # in pytest >= 7.0 this type was removed - Instance = type('dummy', (), {}) + Instance = type("dummy", (), {}) try: from pytest import Dir except ImportError: # in pytest < 8.0 there is no such type - Dir = type('dummy', (), {}) + Dir = type("dummy", (), {}) +try: + from pytest import Mark +except ImportError: + # in old pytest marks are located in the _pytest.mark module + from _pytest.mark import Mark +try: + # noinspection PyPackageRequirements + from pytest_bdd.parser import Background, Feature, Scenario, ScenarioTemplate, Step + + # noinspection PyPackageRequirements + from pytest_bdd.scenario import make_python_name + + PYTEST_BDD = True +except ImportError: + Background = type("dummy", (), {}) + Feature = type("dummy", (), {}) + Scenario = type("dummy", (), {}) + ScenarioTemplate = type("dummy", (), {}) + Step = type("dummy", (), {}) + make_python_name: Callable[[str], str] = lambda x: x + PYTEST_BDD = False + +try: + # noinspection PyPackageRequirements + from pytest_bdd.parser import Rule +except ImportError: + Rule = type("dummy", (), {}) # Old pytest-bdd versions do not have Rule + from reportportal_client import RP, create_client -from reportportal_client.helpers import ( - dict_to_payload, - gen_attributes, - get_launch_sys_attrs, - get_package_version -) +from reportportal_client.helpers import dict_to_payload, gen_attributes, get_launch_sys_attrs, get_package_version LOGGER = logging.getLogger(__name__) +KNOWN_LOG_LEVELS = ("TRACE", "DEBUG", "INFO", "WARN", "ERROR") MAX_ITEM_NAME_LENGTH: int = 1024 -TRUNCATION_STR: str = '...' +TRUNCATION_STR: str = "..." ROOT_DIR: str = str(os.path.abspath(curdir)) -PYTEST_MARKS_IGNORE: Set[str] = {'parametrize', 'usefixtures', 'filterwarnings'} -NOT_ISSUE: Issue = Issue('NOT_ISSUE') -ISSUE_DESCRIPTION_LINE_TEMPLATE: str = '* {}:{}' -ISSUE_DESCRIPTION_URL_TEMPLATE: str = ' [{issue_id}]({url})' -ISSUE_DESCRIPTION_ID_TEMPLATE: str = ' {issue_id}' +PYTEST_MARKS_IGNORE: Set[str] = {"parametrize", "usefixtures", "filterwarnings"} +NOT_ISSUE: Issue = Issue("NOT_ISSUE") +ISSUE_DESCRIPTION_LINE_TEMPLATE: str = "* {}:{}" +ISSUE_DESCRIPTION_URL_TEMPLATE: str = " [{issue_id}]({url})" +ISSUE_DESCRIPTION_ID_TEMPLATE: str = " {issue_id}" +PYTHON_REPLACE_REGEX = re.compile(r"\W") +ALPHA_REGEX = re.compile(r"^\d+_*") +BACKGROUND_STEP_NAME = "Background" def trim_docstring(docstring: str) -> str: @@ -71,7 +101,7 @@ def trim_docstring(docstring: str) -> str: :return: trimmed docstring """ if not docstring: - return '' + return "" # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() @@ -92,7 +122,7 @@ def trim_docstring(docstring: str) -> str: while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: - return '\n'.join(trimmed) + return "\n".join(trimmed) @unique @@ -103,6 +133,8 @@ class LeafType(Enum): FILE = auto() CODE = auto() ROOT = auto() + SUITE = auto() + NESTED = auto() @unique @@ -119,7 +151,7 @@ def check_rp_enabled(func): @wraps(func) def wrap(*args, **kwargs): - if args and isinstance(args[0], PyTestServiceClass): + if args and isinstance(args[0], PyTestService): if not args[0].rp: return return func(*args, **kwargs) @@ -127,13 +159,16 @@ def wrap(*args, **kwargs): return wrap -class PyTestServiceClass: +class PyTestService: """Pytest service class for reporting test results to the Report Portal.""" _config: AgentConfig _issue_types: Dict[str, str] - _tree_path: Dict[Item, List[Dict[str, Any]]] - _log_levels: Tuple[str, str, str, str, str] + _tree_path: Dict[Any, List[Dict[str, Any]]] + _bdd_tree: Optional[Dict[str, Any]] + _bdd_item_by_name: Dict[str, Item] + _bdd_scenario_by_item: Dict[Item, Scenario] + _bdd_item_by_scenario: Dict[Scenario, Item] _start_tracker: Set[str] _launch_id: Optional[str] agent_name: str @@ -148,10 +183,13 @@ def __init__(self, agent_config: AgentConfig) -> None: self._config = agent_config self._issue_types = {} self._tree_path = {} - self._log_levels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR') + self._bdd_tree = None + self._bdd_item_by_name = OrderedDict() + self._bdd_scenario_by_item = {} + self._bdd_item_by_scenario = {} self._start_tracker = set() self._launch_id = None - self.agent_name = 'pytest-reportportal' + self.agent_name = "pytest-reportportal" self.agent_version = get_package_version(self.agent_name) self.ignored_attributes = [] self.parent_item_id = None @@ -182,8 +220,7 @@ def _get_launch_attributes(self, ini_attrs: Optional[List[Dict[str, str]]]) -> L """ attributes = ini_attrs or [] system_attributes = get_launch_sys_attrs() - system_attributes['agent'] = ( - '{}|{}'.format(self.agent_name, self.agent_version)) + system_attributes["agent"] = "{}|{}".format(self.agent_name, self.agent_version) return attributes + dict_to_payload(system_attributes) def _build_start_launch_rq(self) -> Dict[str, Any]: @@ -191,12 +228,12 @@ def _build_start_launch_rq(self) -> Dict[str, Any]: attributes = gen_attributes(rp_launch_attributes) if rp_launch_attributes else None start_rq = { - 'attributes': self._get_launch_attributes(attributes), - 'name': self._config.rp_launch, - 'start_time': timestamp(), - 'description': self._config.rp_launch_description, - 'rerun': self._config.rp_rerun, - 'rerun_of': self._config.rp_rerun_of + "attributes": self._get_launch_attributes(attributes), + "name": self._config.rp_launch, + "start_time": timestamp(), + "description": self._config.rp_launch_description, + "rerun": self._config.rp_rerun, + "rerun_of": self._config.rp_rerun_of, } return start_rq @@ -208,9 +245,9 @@ def start_launch(self) -> Optional[str]: :return: item ID """ sl_pt = self._build_start_launch_rq() - LOGGER.debug('ReportPortal - Start launch: request_body=%s', sl_pt) + LOGGER.debug("ReportPortal - Start launch: request_body=%s", sl_pt) self._launch_id = self.rp.start_launch(**sl_pt) - LOGGER.debug('ReportPortal - Launch started: id=%s', self._launch_id) + LOGGER.debug("ReportPortal - Launch started: id=%s", self._launch_id) return self._launch_id def _get_item_dirs(self, item: Item) -> List[local]: @@ -241,8 +278,9 @@ def _get_tree_path(self, item: Item) -> List[Item]: path.reverse() return path - def _get_leaf(self, leaf_type, parent_item: Optional[Dict[str, Any]], item: Optional[Item], - item_id: Optional[str] = None) -> Dict[str, Any]: + def _create_leaf( + self, leaf_type, parent_item: Optional[Dict[str, Any]], item: Optional[Any], item_id: Optional[str] = None + ) -> Dict[str, Any]: """Construct a leaf for the itest tree. :param leaf_type: the leaf type @@ -251,9 +289,13 @@ def _get_leaf(self, leaf_type, parent_item: Optional[Dict[str, Any]], item: Opti :return: a leaf """ return { - 'children': {}, 'type': leaf_type, 'item': item, - 'parent': parent_item, 'lock': threading.Lock(), - 'exec': ExecStatus.CREATED, 'item_id': item_id + "children": {}, + "type": leaf_type, + "item": item, + "parent": parent_item, + "lock": threading.Lock(), + "exec": ExecStatus.CREATED, + "item_id": item_id, } def _build_test_tree(self, session: Session) -> Dict[str, Any]: @@ -262,7 +304,7 @@ def _build_test_tree(self, session: Session) -> Dict[str, Any]: :param session: pytest.Session object of the current execution :return: a tree of all tests and their suites """ - test_tree = self._get_leaf(LeafType.ROOT, None, None, item_id=self.parent_item_id) + test_tree = self._create_leaf(LeafType.ROOT, None, None, item_id=self.parent_item_id) for item in session.items: dir_path = self._get_item_dirs(item) @@ -270,7 +312,7 @@ def _build_test_tree(self, session: Session) -> Dict[str, Any]: current_leaf = test_tree for i, leaf in enumerate(dir_path + class_path): - children_leafs = current_leaf['children'] + children_leafs = current_leaf["children"] leaf_type = LeafType.DIR if i == len(dir_path): @@ -279,88 +321,125 @@ def _build_test_tree(self, session: Session) -> Dict[str, Any]: leaf_type = LeafType.CODE if leaf not in children_leafs: - children_leafs[leaf] = self._get_leaf(leaf_type, current_leaf, leaf) + children_leafs[leaf] = self._create_leaf(leaf_type, current_leaf, leaf) current_leaf = children_leafs[leaf] return test_tree def _remove_root_dirs(self, test_tree: Dict[str, Any], max_dir_level: int, dir_level: int = 0) -> None: - if test_tree['type'] == LeafType.ROOT: - items = list(test_tree['children'].items()) + if test_tree["type"] == LeafType.ROOT: + items = list(test_tree["children"].items()) for item, child_leaf in items: self._remove_root_dirs(child_leaf, max_dir_level, 1) return - if test_tree['type'] == LeafType.DIR and dir_level <= max_dir_level: + if test_tree["type"] == LeafType.DIR and dir_level <= max_dir_level: new_level = dir_level + 1 - parent_leaf = test_tree['parent'] - current_item = test_tree['item'] - del parent_leaf['children'][current_item] - for item, child_leaf in test_tree['children'].items(): - parent_leaf['children'][item] = child_leaf - child_leaf['parent'] = parent_leaf + parent_leaf = test_tree["parent"] + current_item = test_tree["item"] + del parent_leaf["children"][current_item] + for item, child_leaf in test_tree["children"].items(): + parent_leaf["children"][item] = child_leaf + child_leaf["parent"] = parent_leaf self._remove_root_dirs(child_leaf, max_dir_level, new_level) def _remove_file_names(self, test_tree: Dict[str, Any]) -> None: - if test_tree['type'] != LeafType.FILE: - items = list(test_tree['children'].items()) + if test_tree["type"] != LeafType.FILE: + items = list(test_tree["children"].items()) for item, child_leaf in items: self._remove_file_names(child_leaf) return if not self._config.rp_hierarchy_test_file: - parent_leaf = test_tree['parent'] - current_item = test_tree['item'] - del parent_leaf['children'][current_item] - for item, child_leaf in test_tree['children'].items(): - parent_leaf['children'][item] = child_leaf - child_leaf['parent'] = parent_leaf + parent_leaf = test_tree["parent"] + current_item = test_tree["item"] + del parent_leaf["children"][current_item] + for item, child_leaf in test_tree["children"].items(): + parent_leaf["children"][item] = child_leaf + child_leaf["parent"] = parent_leaf self._remove_file_names(child_leaf) + def _get_scenario_template(self, scenario: Scenario) -> Optional[ScenarioTemplate]: + line_num = scenario.line_number + feature = scenario.feature + scenario_template = None + for template in feature.scenarios.values(): + if template.line_number == line_num: + scenario_template = template + break + if scenario_template and isinstance(scenario_template, ScenarioTemplate): + return scenario_template + def _generate_names(self, test_tree: Dict[str, Any]) -> None: - if test_tree['type'] == LeafType.ROOT: - test_tree['name'] = 'root' + if test_tree["type"] == LeafType.ROOT: + test_tree["name"] = "root" - if test_tree['type'] == LeafType.DIR: - test_tree['name'] = test_tree['item'].basename + if test_tree["type"] == LeafType.DIR: + test_tree["name"] = test_tree["item"].basename - if test_tree['type'] in {LeafType.CODE, LeafType.FILE}: - item = test_tree['item'] + if test_tree["type"] in {LeafType.CODE, LeafType.FILE}: + item = test_tree["item"] if isinstance(item, Module): - test_tree['name'] = os.path.split(str(item.fspath))[1] + test_tree["name"] = os.path.split(str(item.fspath))[1] + elif isinstance(item, Feature): + name = item.name if item.name else item.rel_filename + keyword = getattr(item, "keyword", "Feature") + test_tree["name"] = f"{keyword}: {name}" + elif isinstance(item, Scenario): + scenario_template = self._get_scenario_template(item) + if scenario_template and scenario_template.templated: + keyword = getattr(item, "keyword", "Scenario Outline") + else: + keyword = getattr(item, "keyword", "Scenario") + test_tree["name"] = f"{keyword}: {item.name}" + elif isinstance(item, Rule): + keyword = getattr(item, "keyword", "Rule") + test_tree["name"] = f"{keyword}: {item.name}" else: - test_tree['name'] = item.name + test_tree["name"] = item.name + + if test_tree["type"] == LeafType.SUITE: + item = test_tree["item"] + if isinstance(item, Rule): + keyword = getattr(item, "keyword", "Rule") + test_tree["name"] = f"{keyword}: {item.name}" - for item, child_leaf in test_tree['children'].items(): + for item, child_leaf in test_tree["children"].items(): self._generate_names(child_leaf) - def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separator: str): - child_items = list(test_tree['children'].items()) - if test_tree['type'] not in leaf_types: + def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separator: str) -> None: + child_items = list(test_tree["children"].items()) + if test_tree["type"] not in leaf_types: for item, child_leaf in child_items: self._merge_leaf_types(child_leaf, leaf_types, separator) - elif len(test_tree['children'].items()) > 0: - parent_leaf = test_tree['parent'] - current_item = test_tree['item'] - current_name = test_tree['name'] - del parent_leaf['children'][current_item] + elif len(child_items) > 0: + parent_leaf = test_tree["parent"] + current_item = test_tree["item"] + current_name = test_tree["name"] + child_types = [child_leaf["type"] in leaf_types for _, child_leaf in child_items] + if all(child_types): + del parent_leaf["children"][current_item] for item, child_leaf in child_items: - parent_leaf['children'][item] = child_leaf - child_leaf['parent'] = parent_leaf - child_leaf['name'] = current_name + separator + child_leaf['name'] + if all(child_types): + parent_leaf["children"][item] = child_leaf + child_leaf["parent"] = parent_leaf + child_leaf["name"] = current_name + separator + child_leaf["name"] self._merge_leaf_types(child_leaf, leaf_types, separator) def _merge_dirs(self, test_tree: Dict[str, Any]) -> None: - self._merge_leaf_types(test_tree, {LeafType.DIR}, self._config.rp_dir_path_separator) + self._merge_leaf_types(test_tree, {LeafType.DIR, LeafType.FILE}, self._config.rp_dir_path_separator) + + def _merge_code_with_separator(self, test_tree: Dict[str, Any], separator: str) -> None: + self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE, LeafType.DIR, LeafType.SUITE}, separator) def _merge_code(self, test_tree: Dict[str, Any]) -> None: - self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE}, '::') + self._merge_code_with_separator(test_tree, "::") def _build_item_paths(self, leaf: Dict[str, Any], path: List[Dict[str, Any]]) -> None: - if 'children' in leaf and len(leaf['children']) > 0: + if "children" in leaf and len(leaf["children"]) > 0: path.append(leaf) - for name, child_leaf in leaf['children'].items(): + for name, child_leaf in leaf["children"].items(): self._build_item_paths(child_leaf, path) path.pop() - elif leaf['type'] != LeafType.ROOT: - self._tree_path[leaf['item']] = path + [leaf] + if leaf["type"] != LeafType.ROOT: + self._tree_path[leaf["item"]] = path + [leaf] @check_rp_enabled def collect_tests(self, session: Session) -> None: @@ -386,12 +465,15 @@ def _truncate_item_name(self, name: str) -> str: :return: truncated to maximum length name if needed """ if len(name) > MAX_ITEM_NAME_LENGTH: - name = name[:MAX_ITEM_NAME_LENGTH - len(TRUNCATION_STR)] + TRUNCATION_STR - LOGGER.warning(PytestWarning( - f'Test leaf ID was truncated to "{name}" because of name size constrains on Report Portal')) + name = name[: MAX_ITEM_NAME_LENGTH - len(TRUNCATION_STR)] + TRUNCATION_STR + LOGGER.warning( + PytestWarning( + f'Test leaf ID was truncated to "{name}" because of name size constrains on Report Portal' + ) + ) return name - def _get_item_description(self, test_item): + def _get_item_description(self, test_item: Any) -> Optional[str]: """Get description of item. :param test_item: pytest.Item @@ -404,6 +486,10 @@ def _get_item_description(self, test_item): return trim_docstring(doc) if isinstance(test_item, DoctestItem): return test_item.reportinfo()[2] + if isinstance(test_item, (Feature, Rule)): + description = test_item.description + if description: + return description.lstrip() # There is a bug in pytest-bdd that adds an extra space def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> Any: """ @@ -413,87 +499,123 @@ def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> :param func: a function to execute :return: the result of the function bypassed """ - if 'lock' in leaf: - with leaf['lock']: + if "lock" in leaf: + with leaf["lock"]: return func(leaf) return func(leaf) - def _build_start_suite_rq(self, leaf): - code_ref = str(leaf['item']) if leaf['type'] == LeafType.DIR else str(leaf['item'].fspath) - parent_item_id = self._lock(leaf['parent'], lambda p: p.get('item_id')) if 'parent' in leaf else None + def _process_bdd_attributes(self, item: Union[Feature, Scenario, Rule]) -> List[Dict[str, str]]: + tags = [] + tags.extend(item.tags) + if isinstance(item, Scenario): + test_attributes = self._config.rp_tests_attributes + tags.extend(test_attributes if test_attributes else []) + template = self._get_scenario_template(item) + if template and template.templated: + examples = [] + if isinstance(template.examples, list): + examples.extend(template.examples) + else: + examples.append(template.examples) + for example in examples: + tags.extend(getattr(example, "tags", [])) + return gen_attributes(tags) + + def _get_suite_code_ref(self, leaf: Dict[str, Any]) -> str: + item = leaf["item"] + if leaf["type"] == LeafType.DIR: + code_ref = str(item) + elif leaf["type"] == LeafType.FILE: + if isinstance(item, Feature): + code_ref = str(item.rel_filename) + else: + code_ref = str(item.fspath) + elif leaf["type"] == LeafType.SUITE: + code_ref = self._get_suite_code_ref(leaf["parent"]) + f"/[{type(item).__name__}:{item.name}]" + else: + code_ref = str(item.fspath) + return code_ref + + def _build_start_suite_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: + code_ref = self._get_suite_code_ref(leaf) + parent_item_id = self._lock(leaf["parent"], lambda p: p.get("item_id")) if "parent" in leaf else None + item = leaf["item"] payload = { - 'name': self._truncate_item_name(leaf['name']), - 'description': self._get_item_description(leaf['item']), - 'start_time': timestamp(), - 'item_type': 'SUITE', - 'code_ref': code_ref, - 'parent_item_id': parent_item_id + "name": self._truncate_item_name(leaf["name"]), + "description": self._get_item_description(item), + "start_time": timestamp(), + "item_type": "SUITE", + "code_ref": code_ref, + "parent_item_id": parent_item_id, } + if isinstance(item, (Feature, Scenario, Rule)): + payload["attributes"] = self._process_bdd_attributes(item) return payload - def _start_suite(self, suite_rq): - LOGGER.debug('ReportPortal - Start Suite: request_body=%s', suite_rq) + def _start_suite(self, suite_rq: Dict[str, Any]) -> Optional[str]: + LOGGER.debug("ReportPortal - Start Suite: request_body=%s", suite_rq) return self.rp.start_test_item(**suite_rq) - def _create_suite(self, leaf): - if leaf['exec'] != ExecStatus.CREATED: + def _create_suite(self, leaf: Dict[str, Any]) -> None: + if leaf["exec"] != ExecStatus.CREATED: return item_id = self._start_suite(self._build_start_suite_rq(leaf)) - leaf['item_id'] = item_id - leaf['exec'] = ExecStatus.IN_PROGRESS + leaf["item_id"] = item_id + leaf["exec"] = ExecStatus.IN_PROGRESS @check_rp_enabled - def _create_suite_path(self, item: Item): + def _create_suite_path(self, item: Any) -> None: path = self._tree_path[item] for leaf in path[1:-1]: - if leaf['exec'] != ExecStatus.CREATED: + if leaf["exec"] != ExecStatus.CREATED: continue self._lock(leaf, lambda p: self._create_suite(p)) def _get_item_name(self, mark) -> Optional[str]: - return mark.kwargs.get('name', mark.args[0] if mark.args else None) + return mark.kwargs.get("name", mark.args[0] if mark.args else None) - def _get_code_ref(self, item): + def _get_code_ref(self, item: Item) -> str: # Generate script path from work dir, use only backslashes to have the # same path on different systems and do not affect Test Case ID on # different systems - path = os.path.relpath(str(item.fspath), ROOT_DIR).replace('\\', '/') - method_name = item.originalname if hasattr(item, 'originalname') and item.originalname is not None \ + path = os.path.relpath(str(item.fspath), ROOT_DIR).replace("\\", "/") + method_name = ( + item.originalname + if hasattr(item, "originalname") and getattr(item, "originalname") is not None else item.name + ) parent = item.parent classes = [method_name] while not isinstance(parent, Module): - if not isinstance(parent, Instance) and hasattr(parent, 'name'): + if not isinstance(parent, Instance) and hasattr(parent, "name"): classes.append(parent.name) - if hasattr(parent, 'parent'): + if hasattr(parent, "parent"): parent = parent.parent else: break classes.reverse() - class_path = '.'.join(classes) - return '{0}:{1}'.format(path, class_path) + class_path = ".".join(classes) + return "{0}:{1}".format(path, class_path) - def _get_test_case_id(self, mark, leaf) -> str: - parameters = leaf.get('parameters', None) + def _get_test_case_id(self, mark, leaf: Dict[str, Any]) -> str: + parameters: Optional[Dict[str, Any]] = leaf.get("parameters", None) parameterized = True - selected_params = None + selected_params: Optional[List[str]] = None if mark is not None: - parameterized = mark.kwargs.get('parameterized', False) - selected_params = mark.kwargs.get('params', None) - if selected_params is not None and not isinstance(selected_params, - list): + parameterized = mark.kwargs.get("parameterized", False) + selected_params: Optional[Union[str, List[str]]] = mark.kwargs.get("params", None) + if selected_params is not None and not isinstance(selected_params, list): selected_params = [selected_params] param_str = None if parameterized and parameters is not None and len(parameters) > 0: if selected_params is not None and len(selected_params) > 0: - param_list = [str(parameters.get(param, None)) for param in - selected_params] + param_list = [str(parameters.get(param, None)) for param in selected_params] else: param_list = [str(param) for param in parameters.values()] - param_str = '[{}]'.format(','.join(sorted(param_list))) + param_str = "[{}]".format(",".join(sorted(param_list))) - basic_name_part = leaf['code_ref'] + basic_name_part = leaf["code_ref"] if mark is None: if param_str is None: return basic_name_part @@ -520,8 +642,7 @@ def _get_issue_urls(self, mark, default_url): if not issue_ids: return None mark_url = mark.kwargs.get("url", None) or default_url - return [mark_url.format(issue_id=issue_id) if mark_url else None - for issue_id in issue_ids] + return [mark_url.format(issue_id=issue_id) if mark_url else None for issue_id in issue_ids] def _get_issue_description_line(self, mark, default_url): issue_ids = self._get_issue_ids(mark) @@ -537,7 +658,7 @@ def _get_issue_description_line(self, mark, default_url): issues += template.format(issue_id=issue_id, url=issue_url) return ISSUE_DESCRIPTION_LINE_TEMPLATE.format(reason, issues) - def _get_issue(self, mark) -> Issue: + def _get_issue(self, mark: Mark) -> Optional[Issue]: """Add issues description and issue_type to the test item. :param mark: pytest mark @@ -565,16 +686,20 @@ def _get_issue(self, mark) -> Issue: issue_urls = self._get_issue_urls(mark, default_url) for issue_id, issue_url in zip(issue_ids, issue_urls): issue.external_issue_add( - ExternalIssue(bts_url=self._config.rp_bts_url, bts_project=self._config.rp_bts_project, - ticket_id=issue_id, url=issue_url) + ExternalIssue( + bts_url=self._config.rp_bts_url, + bts_project=self._config.rp_bts_project, + ticket_id=issue_id, + url=issue_url, + ) ) return issue def _to_attribute(self, attribute_tuple): if attribute_tuple[0]: - return {'key': attribute_tuple[0], 'value': attribute_tuple[1]} + return {"key": attribute_tuple[0], "value": attribute_tuple[1]} else: - return {'value': attribute_tuple[1]} + return {"value": attribute_tuple[1]} def _process_item_name(self, leaf: Dict[str, Any]) -> str: """ @@ -583,9 +708,9 @@ def _process_item_name(self, leaf: Dict[str, Any]) -> str: :param leaf: item context :return: Item Name string """ - item = leaf['item'] - name = leaf['name'] - names = [m for m in item.iter_markers() if m.name == 'name'] + item = leaf["item"] + name = leaf["name"] + names = [m for m in item.iter_markers() if m.name == "name"] if len(names) > 0: mark_name = self._get_item_name(names[0]) if mark_name: @@ -599,52 +724,57 @@ def _get_parameters(self, item) -> Optional[Dict[str, Any]]: :param item: Pytest.Item :return: dict of params """ - params = item.callspec.params if hasattr(item, 'callspec') else None + params = item.callspec.params if hasattr(item, "callspec") else None if not params: return None - return {str(k): v.replace('\0', '\\0') if isinstance(v, str) else v for k, v in params.items()} + return {str(k): v.replace("\0", "\\0") if isinstance(v, str) else v for k, v in params.items()} - def _process_test_case_id(self, leaf): + def _process_test_case_id(self, leaf: Dict[str, Any]) -> str: """ Process Test Case ID if set. :param leaf: item context :return: Test Case ID string """ - tc_ids = [m for m in leaf['item'].iter_markers() if m.name == 'tc_id'] + tc_ids = [m for m in leaf["item"].iter_markers() if m.name == "tc_id"] if len(tc_ids) > 0: return self._get_test_case_id(tc_ids[0], leaf) return self._get_test_case_id(None, leaf) - def _process_issue(self, item) -> Issue: + def _process_issue(self, item: Item) -> Optional[Issue]: """ Process Issue if set. :param item: Pytest.Item :return: Issue """ - issues = [m for m in item.iter_markers() if m.name == 'issue'] + issues = [m for m in item.iter_markers() if m.name == "issue"] if len(issues) > 0: return self._get_issue(issues[0]) - def _process_attributes(self, item): + def _process_attributes(self, item: Item) -> List[Dict[str, Any]]: """ Process attributes of item. :param item: Pytest.Item :return: a set of attributes """ - attributes = set() + test_attributes = self._config.rp_tests_attributes + if test_attributes: + attributes = { + (attr.get("key", None), attr["value"]) for attr in gen_attributes(self._config.rp_tests_attributes) + } + else: + attributes = set() for marker in item.iter_markers(): - if marker.name == 'issue': + if marker.name == "issue": if self._config.rp_issue_id_marks: for issue_id in self._get_issue_ids(marker): attributes.add((marker.name, issue_id)) continue - if marker.name == 'name': + if marker.name == "name": continue - if marker.name in self._config.rp_ignore_attributes \ - or marker.name in PYTEST_MARKS_IGNORE: + if marker.name in self._config.rp_ignore_attributes or marker.name in PYTEST_MARKS_IGNORE: continue if len(marker.args) > 0: attributes.add((marker.name, str(marker.args[0]))) @@ -659,13 +789,14 @@ def _process_metadata_item_start(self, leaf: Dict[str, Any]) -> None: :param leaf: item context """ - item = leaf['item'] - leaf['name'] = self._process_item_name(leaf) - leaf['parameters'] = self._get_parameters(item) - leaf['code_ref'] = self._get_code_ref(item) - leaf['test_case_id'] = self._process_test_case_id(leaf) - leaf['issue'] = self._process_issue(item) - leaf['attributes'] = self._process_attributes(item) + item = leaf["item"] + leaf["name"] = self._process_item_name(leaf) + leaf["description"] = self._get_item_description(item) + leaf["parameters"] = self._get_parameters(item) + leaf["code_ref"] = self._get_code_ref(item) + leaf["test_case_id"] = self._process_test_case_id(leaf) + leaf["issue"] = self._process_issue(item) + leaf["attributes"] = self._process_attributes(item) def _process_metadata_item_finish(self, leaf: Dict[str, Any]) -> None: """ @@ -673,32 +804,32 @@ def _process_metadata_item_finish(self, leaf: Dict[str, Any]) -> None: :param leaf: item context """ - item = leaf['item'] - leaf['attributes'] = self._process_attributes(item) - leaf['issue'] = self._process_issue(item) + item = leaf["item"] + leaf["attributes"] = self._process_attributes(item) + leaf["issue"] = self._process_issue(item) def _build_start_step_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: payload = { - 'attributes': leaf.get('attributes', None), - 'name': self._truncate_item_name(leaf['name']), - 'description': self._get_item_description(leaf['item']), - 'start_time': timestamp(), - 'item_type': 'STEP', - 'code_ref': leaf.get('code_ref', None), - 'parameters': leaf.get('parameters', None), - 'parent_item_id': self._lock(leaf['parent'], lambda p: p['item_id']), - 'test_case_id': leaf.get('test_case_id', None) + "attributes": leaf.get("attributes", None), + "name": self._truncate_item_name(leaf["name"]), + "description": leaf["description"], + "start_time": timestamp(), + "item_type": "STEP", + "code_ref": leaf.get("code_ref", None), + "parameters": leaf.get("parameters", None), + "parent_item_id": self._lock(leaf["parent"], lambda p: p["item_id"]), + "test_case_id": leaf.get("test_case_id", None), } return payload - def _start_step(self, step_rq): - LOGGER.debug('ReportPortal - Start TestItem: request_body=%s', step_rq) + def _start_step(self, step_rq: Dict[str, Any]) -> Optional[str]: + LOGGER.debug("ReportPortal - Start TestItem: request_body=%s", step_rq) return self.rp.start_test_item(**step_rq) - def __unique_id(self): - return str(os.getpid()) + '-' + str(threading.current_thread().ident) + def __unique_id(self) -> str: + return str(os.getpid()) + "-" + str(threading.current_thread().ident) - def __started(self): + def __started(self) -> bool: return self.__unique_id() in self._start_tracker @check_rp_enabled @@ -707,7 +838,7 @@ def start_pytest_item(self, test_item: Optional[Item] = None): Start pytest_item. :param test_item: pytest.Item - :return: item ID + :return: None """ if test_item is None: return @@ -715,14 +846,18 @@ def start_pytest_item(self, test_item: Optional[Item] = None): if not self.__started(): self.start() + if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + self._bdd_item_by_name[test_item.name] = test_item + return + self._create_suite_path(test_item) current_leaf = self._tree_path[test_item][-1] self._process_metadata_item_start(current_leaf) item_id = self._start_step(self._build_start_step_rq(current_leaf)) - current_leaf['item_id'] = item_id - current_leaf['exec'] = ExecStatus.IN_PROGRESS + current_leaf["item_id"] = item_id + current_leaf["exec"] = ExecStatus.IN_PROGRESS - def process_results(self, test_item, report): + def process_results(self, test_item: Item, report): """ Save test item results after execution. @@ -730,96 +865,103 @@ def process_results(self, test_item, report): :param report: pytest's result report """ if report.longrepr: - self.post_log(test_item, report.longreprtext, log_level='ERROR') + self.post_log(test_item, report.longreprtext, log_level="ERROR") + + if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + return leaf = self._tree_path[test_item][-1] # Defining test result - if report.when == 'setup': - leaf['status'] = 'PASSED' + if report.when == "setup": + leaf["status"] = "PASSED" if report.failed: - leaf['status'] = 'FAILED' + leaf["status"] = "FAILED" return if report.skipped: - if leaf['status'] in (None, 'PASSED'): - leaf['status'] = 'SKIPPED' + if leaf["status"] in (None, "PASSED"): + leaf["status"] = "SKIPPED" - def _build_finish_step_rq(self, leaf): - issue = leaf.get('issue', None) - status = leaf['status'] - if status == 'SKIPPED' and not self._config.rp_is_skipped_an_issue: + def _build_finish_step_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: + issue = leaf.get("issue", None) + status = leaf.get("status", "PASSED") + if status == "SKIPPED" and not self._config.rp_is_skipped_an_issue: issue = NOT_ISSUE - if status == 'PASSED': + if status == "PASSED": issue = None payload = { - 'attributes': leaf.get('attributes', None), - 'end_time': timestamp(), - 'status': status, - 'issue': issue, - 'item_id': leaf['item_id'] + "attributes": leaf.get("attributes", None), + "end_time": timestamp(), + "status": status, + "issue": issue, + "item_id": leaf["item_id"], } return payload - def _finish_step(self, finish_rq): - LOGGER.debug('ReportPortal - Finish TestItem: request_body=%s', finish_rq) + def _finish_step(self, finish_rq: Dict[str, Any]) -> None: + LOGGER.debug("ReportPortal - Finish TestItem: request_body=%s", finish_rq) self.rp.finish_test_item(**finish_rq) - def _finish_suite(self, finish_rq): - LOGGER.debug('ReportPortal - End TestSuite: request_body=%s', finish_rq) + def _finish_suite(self, finish_rq: Dict[str, Any]) -> None: + LOGGER.debug("ReportPortal - End TestSuite: request_body=%s", finish_rq) self.rp.finish_test_item(**finish_rq) - def _build_finish_suite_rq(self, leaf): - payload = { - 'end_time': timestamp(), - 'item_id': leaf['item_id'] - } + def _build_finish_suite_rq(self, leaf) -> Dict[str, Any]: + payload = {"end_time": timestamp(), "item_id": leaf["item_id"]} return payload - def _proceed_suite_finish(self, leaf): - if leaf.get('exec', ExecStatus.FINISHED) == ExecStatus.FINISHED: + def _proceed_suite_finish(self, leaf) -> None: + if leaf.get("exec", ExecStatus.FINISHED) == ExecStatus.FINISHED: return self._finish_suite(self._build_finish_suite_rq(leaf)) - leaf['exec'] = ExecStatus.FINISHED - - def _finish_parents(self, leaf): - if 'parent' not in leaf or leaf['parent'] is None \ - or leaf['parent']['type'] is LeafType.ROOT \ - or leaf['parent'].get('exec', ExecStatus.FINISHED) == \ - ExecStatus.FINISHED: + leaf["exec"] = ExecStatus.FINISHED + + def _finish_parents(self, leaf: Dict[str, Any]) -> None: + if ( + "parent" not in leaf + or leaf["parent"] is None + or leaf["parent"]["type"] is LeafType.ROOT + or leaf["parent"].get("exec", ExecStatus.FINISHED) == ExecStatus.FINISHED + ): return - for item, child_leaf in leaf['parent']['children'].items(): - current_status = child_leaf['exec'] + for item, child_leaf in leaf["parent"]["children"].items(): + current_status = child_leaf["exec"] if current_status != ExecStatus.FINISHED: - current_status = self._lock(child_leaf, lambda p: p['exec']) + current_status = self._lock(child_leaf, lambda p: p["exec"]) if current_status != ExecStatus.FINISHED: return - self._lock(leaf['parent'], lambda p: self._proceed_suite_finish(p)) - self._finish_parents(leaf['parent']) + self._lock(leaf["parent"], lambda p: self._proceed_suite_finish(p)) + self._finish_parents(leaf["parent"]) @check_rp_enabled - def finish_pytest_item(self, test_item): - """ - Finish pytest_item. + def finish_pytest_item(self, test_item: Optional[Item] = None) -> None: + """Finish pytest_item. :param test_item: pytest.Item :return: None """ - path = self._tree_path[test_item] - leaf = path[-1] + if test_item is None: + return + + leaf = self._tree_path[test_item][-1] self._process_metadata_item_finish(leaf) + + if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + del self._bdd_item_by_name[test_item.name] + return + self._finish_step(self._build_finish_step_rq(leaf)) - leaf['exec'] = ExecStatus.FINISHED + leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) - def _get_items(self, exec_status): - return [k for k, v in self._tree_path.items() if - v[-1]['exec'] == exec_status] + def _get_items(self, exec_status) -> List[Item]: + return [k for k, v in self._tree_path.items() if v[-1]["exec"] == exec_status] - def finish_suites(self): + def finish_suites(self) -> None: """ Finish all suites in run with status calculations. @@ -830,50 +972,49 @@ def finish_suites(self): """ # Ensure there is no running items finish_time = time() - while len(self._get_items(ExecStatus.IN_PROGRESS)) > 0 \ - and time() - finish_time <= self._config.rp_launch_timeout: + while ( + len(self._get_items(ExecStatus.IN_PROGRESS)) > 0 and time() - finish_time <= self._config.rp_launch_timeout + ): sleep(0.1) skipped_items = self._get_items(ExecStatus.CREATED) for item in skipped_items: path = list(self._tree_path[item]) path.reverse() for leaf in path[1:-1]: - if leaf['exec'] == ExecStatus.IN_PROGRESS: + if leaf["exec"] == ExecStatus.IN_PROGRESS: self._lock(leaf, lambda p: self._proceed_suite_finish(p)) - def _build_finish_launch_rq(self): - finish_rq = { - 'end_time': timestamp() - } + def _build_finish_launch_rq(self) -> Dict[str, Any]: + finish_rq = {"end_time": timestamp()} return finish_rq - def _finish_launch(self, finish_rq): - LOGGER.debug('ReportPortal - Finish launch: request_body=%s', finish_rq) + def _finish_launch(self, finish_rq) -> None: + LOGGER.debug("ReportPortal - Finish launch: request_body=%s", finish_rq) self.rp.finish_launch(**finish_rq) @check_rp_enabled - def finish_launch(self): - """ - Finish tests launch. - - :return: None - """ + def finish_launch(self) -> None: + """Finish test launch.""" # To finish launch session str parameter is needed self._finish_launch(self._build_finish_launch_rq()) - def _build_log(self, item_id: str, message: str, log_level: str, attachment: Optional[Any] = None): + def _build_log( + self, item_id: str, message: str, log_level: str, attachment: Optional[Any] = None + ) -> Dict[str, Any]: sl_rq = { - 'item_id': item_id, - 'time': timestamp(), - 'message': message, - 'level': log_level, + "item_id": item_id, + "time": timestamp(), + "message": message, + "level": log_level, } if attachment: - sl_rq['attachment'] = attachment + sl_rq["attachment"] = attachment return sl_rq @check_rp_enabled - def post_log(self, test_item, message: str, log_level: str = 'INFO', attachment: Optional[Any] = None): + def post_log( + self, test_item: Item, message: str, log_level: str = "INFO", attachment: Optional[Any] = None + ) -> None: """ Send a log message to the Report Portal. @@ -884,15 +1025,23 @@ def post_log(self, test_item, message: str, log_level: str = 'INFO', attachment: :param attachment: attachment file :return: None """ - if log_level not in self._log_levels: - LOGGER.warning('Incorrect loglevel = %s. Force set to INFO. ' - 'Available levels: %s.', log_level, self._log_levels) - item_id = self._tree_path[test_item][-1]['item_id'] + if log_level not in KNOWN_LOG_LEVELS: + LOGGER.warning( + "Incorrect loglevel = %s. Force set to INFO. " "Available levels: %s.", log_level, KNOWN_LOG_LEVELS + ) + item_id = self._tree_path[test_item][-1]["item_id"] + if PYTEST_BDD: + if not item_id: + # Check if we are actually a BDD scenario + scenario = self._bdd_scenario_by_item[test_item] + if scenario: + # Yes, we are a BDD scenario, report log to the scenario + item_id = self._tree_path[scenario][-1]["item_id"] sl_rq = self._build_log(item_id, message, log_level, attachment) self.rp.log(**sl_rq) - def report_fixture(self, name: str, error_msg: str) -> None: + def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None]: """Report fixture setup and teardown. :param name: Name of the fixture @@ -907,29 +1056,302 @@ def report_fixture(self, name: str, error_msg: str) -> None: try: outcome = yield - exception = outcome.exception - status = 'PASSED' + exc_info = outcome.excinfo + exception = exc_info[1] if exc_info else None + status = "PASSED" if exception: - if type(exception).__name__ != 'Skipped': - status = 'FAILED' - self.post_log(name, error_msg, log_level='ERROR') + if type(exception).__name__ != "Skipped": + status = "FAILED" + error_log = self._build_log(item_id, error_msg, log_level="ERROR") + self.rp.log(**error_log) + traceback_str = "\n".join( + traceback.format_exception(outcome.excinfo[0], value=exception, tb=exc_info[2]) + ) + exception_log = self._build_log(item_id, traceback_str, log_level="ERROR") + self.rp.log(**exception_log) reporter.finish_nested_step(item_id, timestamp(), status) except Exception as e: - LOGGER.error('Failed to report fixture: %s', name) + LOGGER.error("Failed to report fixture: %s", name) LOGGER.exception(e) - reporter.finish_nested_step(item_id, timestamp(), 'FAILED') + reporter.finish_nested_step(item_id, timestamp(), "FAILED") + + def _get_python_name(self, scenario: Scenario) -> str: + python_name = f"test_{make_python_name(self._get_scenario_template(scenario).name)}" + same_item_names = [name for name in self._bdd_item_by_name.keys() if name.startswith(python_name)] + if len(same_item_names) < 1: + return python_name + else: + return same_item_names[-1] # Should work fine, since we use OrderedDict + + def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: + """Save BDD scenario and Feature to test tree. The scenario will be started later if a step will be reported. + + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + """ + if not PYTEST_BDD: + return + item_name = self._get_python_name(scenario) + test_item = self._bdd_item_by_name.get(item_name, None) + self._bdd_scenario_by_item[test_item] = scenario + self._bdd_item_by_scenario[scenario] = test_item + + root_leaf = self._bdd_tree + if not root_leaf: + self._bdd_tree = root_leaf = self._create_leaf(LeafType.ROOT, None, None, item_id=self.parent_item_id) + children_leafs = root_leaf["children"] + if feature in children_leafs: + feature_leaf = children_leafs[feature] + else: + feature_leaf = self._create_leaf(LeafType.FILE, root_leaf, feature) + children_leafs[feature] = feature_leaf + children_leafs = feature_leaf["children"] + rule = getattr(scenario, "rule", None) + if rule: + if rule in children_leafs: + rule_leaf = children_leafs[rule] + else: + rule_leaf = self._create_leaf(LeafType.SUITE, feature_leaf, rule) + children_leafs[rule] = rule_leaf + else: + rule_leaf = feature_leaf + children_leafs = rule_leaf["children"] + scenario_leaf = self._create_leaf(LeafType.CODE, rule_leaf, scenario) + children_leafs[scenario] = scenario_leaf + children_leafs = scenario_leaf["children"] + background = feature.background + if background: + if background not in children_leafs: + background_leaf = self._create_leaf(LeafType.NESTED, rule_leaf, background) + children_leafs[background] = background_leaf + + self._remove_file_names(root_leaf) + self._generate_names(root_leaf) + if not self._config.rp_hierarchy_code: + try: + self._merge_code_with_separator(root_leaf, " - ") + except Exception as e: + LOGGER.exception(e) + self._build_item_paths(root_leaf, []) + + def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: + """Finish BDD scenario. Skip if it was not started. + + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + """ + if not PYTEST_BDD: + return + + leaf = self._tree_path[scenario][-1] + if leaf["exec"] != ExecStatus.IN_PROGRESS: + return + self._finish_step(self._build_finish_step_rq(leaf)) + leaf["exec"] = ExecStatus.FINISHED + self._finish_parents(leaf) + + def _get_scenario_parameters_from_template(self, scenario: Scenario) -> Optional[Dict[str, str]]: + """Get scenario parameters from its template by comparing steps. + + :param scenario: The scenario instance + :return: A dictionary with parameter names and values, or None if no parameters found + """ + item = self._bdd_item_by_scenario.get(scenario, None) + if not item: + return None + item_params = item.callspec.params if hasattr(item, "callspec") else None + if not item_params: + return None + if "_pytest_bdd_example" in item_params: + return OrderedDict(item_params["_pytest_bdd_example"]) + return None + + def _get_scenario_code_ref(self, scenario: Scenario, scenario_template: Optional[ScenarioTemplate]) -> str: + code_ref = scenario.feature.rel_filename + "/" + rule = getattr(scenario, "rule", None) + if rule: + code_ref += f"[RULE:{rule.name}]/" + if scenario_template and scenario_template.templated and scenario_template.examples: + parameters = self._get_scenario_parameters_from_template(scenario) + if parameters: + parameters_str = ";".join([f"{k}:{v}" for k, v in sorted(parameters.items())]) + parameters_str = f"[{parameters_str}]" if parameters_str else "" + else: + parameters_str = "" + code_ref += f"[EXAMPLE:{scenario.name}{parameters_str}]" + else: + keyword = getattr(scenario, "keyword", "Scenario").upper() + code_ref += f"[{keyword}:{scenario.name}]" + + return code_ref + + def _get_scenario_test_case_id(self, leaf: Dict[str, Any]) -> str: + attributes = leaf.get("attributes", []) + params: Optional[Dict[str, str]] = leaf.get("parameters", None) + for attribute in attributes: + if attribute.get("key", None) == "tc_id": + tc_id = attribute["value"] + params_str = "" + if params: + params_str = ";".join([f"{k}:{v}" for k, v in sorted(params.items())]) + params_str = f"[{params_str}]" + return f"{tc_id}{params_str}" + return leaf["code_ref"] + + def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: + """ + Process all types of scenario metadata for its start event. + + :param leaf: item context + """ + scenario = leaf["item"] + description = ( + "\n".join(scenario.description) if isinstance(scenario.description, list) else scenario.description + ).rstrip("\n") + leaf["description"] = description if description else None + scenario_template = self._get_scenario_template(scenario) + if scenario_template and scenario_template.templated: + parameters = self._get_scenario_parameters_from_template(scenario) + leaf["parameters"] = parameters + if parameters: + parameters_str = f"Parameters:\n\n{markdown_helpers.format_data_table_dict(parameters)}" + if leaf["description"]: + leaf["description"] = markdown_helpers.as_two_parts(leaf["description"], parameters_str) + else: + leaf["description"] = parameters_str + leaf["code_ref"] = self._get_scenario_code_ref(scenario, scenario_template) + leaf["attributes"] = self._process_bdd_attributes(scenario) + leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) + + def _finish_bdd_step(self, leaf: Dict[str, Any], status: str) -> None: + if leaf["exec"] != ExecStatus.IN_PROGRESS: + return + + reporter = self.rp.step_reporter + item_id = leaf["item_id"] + reporter.finish_nested_step(item_id, timestamp(), status) + leaf["exec"] = ExecStatus.FINISHED + + def _is_background_step(self, step: Step, feature: Feature) -> bool: + """Check if step belongs to feature background. + + :param step: Current step + :param feature: Current feature + :return: True if step is from background, False otherwise + """ + if not feature.background: + return False + + background_steps = feature.background.steps + return any( + s.name == step.name and s.keyword == step.keyword and s.line_number == step.line_number + for s in background_steps + ) + + @check_rp_enabled + def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: + """Start BDD step. + + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + :param step: pytest_bdd.Step + """ + if not PYTEST_BDD: + return + + self._create_suite_path(scenario) + scenario_leaf = self._tree_path[scenario][-1] + if scenario_leaf["exec"] != ExecStatus.IN_PROGRESS: + self._process_scenario_metadata(scenario_leaf) + scenario_leaf["item_id"] = self._start_step(self._build_start_step_rq(scenario_leaf)) + scenario_leaf["exec"] = ExecStatus.IN_PROGRESS + reporter = self.rp.step_reporter + step_leaf = self._create_leaf(LeafType.NESTED, scenario_leaf, step) + if self._is_background_step(step, feature): + background_leaf = scenario_leaf["children"][feature.background] + background_leaf["children"][step] = step_leaf + if background_leaf["exec"] != ExecStatus.IN_PROGRESS: + item_id = reporter.start_nested_step(BACKGROUND_STEP_NAME, timestamp()) + background_leaf["item_id"] = item_id + background_leaf["exec"] = ExecStatus.IN_PROGRESS + else: + scenario_leaf["children"][step] = step_leaf + if feature.background: + background_leaf = scenario_leaf["children"][feature.background] + self._finish_bdd_step(background_leaf, "PASSED") + item_id = reporter.start_nested_step(f"{step.keyword} {step.name}", timestamp()) + step_leaf["item_id"] = item_id + step_leaf["exec"] = ExecStatus.IN_PROGRESS + + @check_rp_enabled + def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: + """Finish BDD step. + + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + :param step: pytest_bdd.Step + """ + if not PYTEST_BDD: + return + + scenario_leaf = self._tree_path[scenario][-1] + background_steps = [] + if feature.background: + background_steps = feature.background.steps + if next( + filter( + lambda s: s.name == step.name and s.keyword == step.keyword and s.line_number == step.line_number, + background_steps, + ), + None, + ): + parent_leaf = scenario_leaf["children"][feature.background] + else: + parent_leaf = scenario_leaf + step_leaf = parent_leaf["children"][step] + self._finish_bdd_step(step_leaf, "PASSED") + + @check_rp_enabled + def finish_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step, exception: Exception) -> None: + """Report BDD step error. + + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + :param step: pytest_bdd.Step + :param exception: Exception + """ + if not PYTEST_BDD: + return + + scenario_leaf = self._tree_path[scenario][-1] + scenario_leaf["status"] = "FAILED" + if step.background: + step_leaf = scenario_leaf["children"][step.background]["children"][step] + else: + step_leaf = scenario_leaf["children"][step] + item_id = step_leaf["item_id"] + traceback_str = "\n".join( + traceback.format_exception(type(exception), value=exception, tb=exception.__traceback__) + ) + exception_log = self._build_log(item_id, traceback_str, log_level="ERROR") + client = self.rp.step_reporter.client + client.log(**exception_log) + + self._finish_bdd_step(step_leaf, "FAILED") + if step.background: + background_leaf = scenario_leaf["children"][step.background] + self._finish_bdd_step(background_leaf, "FAILED") def start(self) -> None: """Start servicing Report Portal requests.""" self.parent_item_id = self._config.rp_parent_item_id - self.ignored_attributes = list( - set( - self._config.rp_ignore_attributes or [] - ).union({'parametrize'}) + self.ignored_attributes = list(set(self._config.rp_ignore_attributes or []).union({"parametrize"})) + LOGGER.debug( + "ReportPortal - Init service: endpoint=%s, " "project=%s, api_key=%s", + self._config.rp_endpoint, + self._config.rp_project, + self._config.rp_api_key, ) - LOGGER.debug('ReportPortal - Init service: endpoint=%s, ' - 'project=%s, api_key=%s', self._config.rp_endpoint, - self._config.rp_project, self._config.rp_api_key) launch_id = self._launch_id if self._config.rp_launch_id: launch_id = self._config.rp_launch_id @@ -947,14 +1369,14 @@ def start(self) -> None: launch_uuid_print=self._config.rp_launch_uuid_print, print_output=self._config.rp_launch_uuid_print_output, http_timeout=self._config.rp_http_timeout, - mode=self._config.rp_mode + mode=self._config.rp_mode, ) if hasattr(self.rp, "get_project_settings"): self.project_settings = self.rp.get_project_settings() # noinspection PyUnresolvedReferences self._start_tracker.add(self.__unique_id()) - def stop(self): + def stop(self) -> None: """Finish servicing Report Portal requests.""" self.rp.close() self.rp = None diff --git a/requirements-dev-bdd.txt b/requirements-dev-bdd.txt new file mode 100644 index 0000000..472e2bd --- /dev/null +++ b/requirements-dev-bdd.txt @@ -0,0 +1 @@ +pytest-bdd>=7.2.0 diff --git a/requirements.txt b/requirements.txt index 616a738..ea8ffec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ dill>=0.3.6 -pytest>=3.8.0 -reportportal-client~=5.5.10 +pytest>=4.6.10 +reportportal-client~=5.6.2 aenum>=3.1.0 diff --git a/setup.py b/setup.py index b5dc3ab..f5758a9 100644 --- a/setup.py +++ b/setup.py @@ -17,8 +17,7 @@ from setuptools import setup - -__version__ = '5.4.7' +__version__ = "5.5.0" def read_file(fname): @@ -32,32 +31,31 @@ def read_file(fname): setup( - name='pytest-reportportal', + name="pytest-reportportal", version=__version__, - description='Agent for Reporting results of tests to the Report Portal', - long_description=read_file('README.rst'), - long_description_content_type='text/x-rst', - author='Report Portal Team', - author_email='support@reportportal.io', - url='https://github.com/reportportal/agent-python-pytest', - packages=['pytest_reportportal'], - package_data={'pytest_reportportal': ['*.pyi']}, - install_requires=read_file('requirements.txt').splitlines(), - license='Apache 2.0', - keywords=['testing', 'reporting', 'reportportal', 'pytest', 'agent'], + description="Agent for Reporting results of tests to the Report Portal", + long_description=read_file("README.md"), + long_description_content_type="text/markdown", + author="Report Portal Team", + author_email="support@reportportal.io", + url="https://github.com/reportportal/agent-python-pytest", + packages=["pytest_reportportal"], + package_data={"pytest_reportportal": ["*.pyi"]}, + install_requires=read_file("requirements.txt").splitlines(), + license="Apache 2.0", + keywords=["testing", "reporting", "reportportal", "pytest", "agent"], classifiers=[ - 'Framework :: Pytest', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Programming Language :: Python :: 3.12', - 'Programming Language :: Python :: 3.13' - ], + "Framework :: Pytest", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + ], entry_points={ - 'pytest11': [ - 'pytest_reportportal = pytest_reportportal.plugin', + "pytest11": [ + "pytest_reportportal = pytest_reportportal.plugin", ] - } + }, ) diff --git a/tests/__init__.py b/tests/__init__.py index f7f99a7..5ada03e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -13,5 +13,5 @@ """This package contains tests for the project.""" -REPORT_PORTAL_SERVICE = 'reportportal_client.RPClient' -REQUESTS_SERVICE = 'reportportal_client.client.requests.Session' +REPORT_PORTAL_SERVICE = "reportportal_client.RPClient" +REQUESTS_SERVICE = "reportportal_client.client.requests.Session" diff --git a/tests/helpers/utils.py b/tests/helpers/utils.py index 5046575..babb799 100644 --- a/tests/helpers/utils.py +++ b/tests/helpers/utils.py @@ -21,67 +21,67 @@ import pytest DEFAULT_VARIABLES = { - 'rp_launch': 'Pytest', - 'rp_endpoint': 'http://localhost:8080', - 'rp_project': 'default_personal', - 'rp_api_key': 'test_api_key', - 'rp_skip_connection_test': 'True' + "rp_launch": "Pytest", + "rp_endpoint": "http://localhost:8080", + "rp_project": "default_personal", + "rp_api_key": "test_api_key", + "rp_skip_connection_test": "True", } DEFAULT_PROJECT_SETTINGS = { - 'project': 2, - 'subTypes': { - 'NO_DEFECT': [ + "project": 2, + "subTypes": { + "NO_DEFECT": [ { - 'id': 4, - 'locator': 'nd001', - 'typeRef': 'NO_DEFECT', - 'longName': 'No Defect', - 'shortName': 'ND', - 'color': "#777777" + "id": 4, + "locator": "nd001", + "typeRef": "NO_DEFECT", + "longName": "No Defect", + "shortName": "ND", + "color": "#777777", } ], - 'TO_INVESTIGATE': [ + "TO_INVESTIGATE": [ { - 'id': 1, - 'locator': 'ti001', - 'typeRef': 'TO_INVESTIGATE', - 'longName': 'To Investigate', - 'shortName': 'TI', - 'color': '#ffb743' + "id": 1, + "locator": "ti001", + "typeRef": "TO_INVESTIGATE", + "longName": "To Investigate", + "shortName": "TI", + "color": "#ffb743", } ], - 'AUTOMATION_BUG': [ + "AUTOMATION_BUG": [ { - 'id': 2, - 'locator': 'ab001', - 'typeRef': 'AUTOMATION_BUG', - 'longName': 'Automation Bug', - 'shortName': 'AB', - 'color': '#f7d63e' + "id": 2, + "locator": "ab001", + "typeRef": "AUTOMATION_BUG", + "longName": "Automation Bug", + "shortName": "AB", + "color": "#f7d63e", } ], - 'PRODUCT_BUG': [ + "PRODUCT_BUG": [ { - 'id': 3, - 'locator': 'pb001', - 'typeRef': 'PRODUCT_BUG', - 'longName': 'Product Bug', - 'shortName': 'PB', - 'color': '#ec3900' + "id": 3, + "locator": "pb001", + "typeRef": "PRODUCT_BUG", + "longName": "Product Bug", + "shortName": "PB", + "color": "#ec3900", } ], - 'SYSTEM_ISSUE': [ + "SYSTEM_ISSUE": [ { - 'id': 5, - 'locator': 'si001', - 'typeRef': 'SYSTEM_ISSUE', - 'longName': 'System Issue', - 'shortName': 'SI', - 'color': '#0274d1' + "id": 5, + "locator": "si001", + "typeRef": "SYSTEM_ISSUE", + "longName": "System Issue", + "shortName": "SI", + "color": "#0274d1", } - ] - } + ], + }, } @@ -98,27 +98,26 @@ def run_pytest_tests(tests, args=None, variables=None): if variables is None: variables = DEFAULT_VARIABLES - arguments = ['--reportportal'] + args + arguments = ["--reportportal"] + args for k, v in variables.items(): - arguments.append('-o') - arguments.append('{0}={1}'.format(k, str(v))) + arguments.append("-o") + arguments.append("{0}={1}".format(k, str(v))) if tests is not None: for t in tests: arguments.append(t) # Workaround collisions with parent test - current_test = os.environ['PYTEST_CURRENT_TEST'] - del os.environ['PYTEST_CURRENT_TEST'] + current_test = os.environ["PYTEST_CURRENT_TEST"] + del os.environ["PYTEST_CURRENT_TEST"] result = pytest.main(arguments) - os.environ['PYTEST_CURRENT_TEST'] = current_test + os.environ["PYTEST_CURRENT_TEST"] = current_test return result def item_id_gen(**kwargs): - return "{}-{}-{}".format(kwargs['name'], str(round(time.time() * 1000)), - random.randint(0, 9999)) + return "{}-{}-{}".format(kwargs["name"], str(round(time.time() * 1000)), random.randint(0, 9999)) def project_settings(**kwargs): @@ -128,10 +127,10 @@ def project_settings(**kwargs): def attributes_to_tuples(attributes): result = set() for attribute in attributes: - if 'key' in attribute: - result.add((attribute['key'], attribute['value'])) + if "key" in attribute: + result.add((attribute["key"], attribute["value"])) else: - result.add((None, attribute['value'])) + result.add((None, attribute["value"])) return result @@ -139,6 +138,7 @@ def attributes_to_tuples(attributes): def run_tests_with_client(client, tests, args=None, variables=None): def test_func(): from reportportal_client import set_current + set_current(client) return run_pytest_tests(tests, args, variables) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e062897..4a238f7 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -15,175 +15,257 @@ from tests.helpers import utils -HIERARCHY_TESTS = \ - [ - ['examples/test_simple.py'], - ['examples/hierarchy/inner/test_inner_simple.py'], - ['examples/hierarchy/test_in_class.py'], - ['examples/hierarchy/test_in_class_in_class.py'], - ['examples/hierarchy/another_inner/test_another_inner_simple.py', - 'examples/hierarchy/inner/test_inner_simple.py'] - ] + \ - [['examples/params/test_in_class_parameterized.py']] * 5 + \ - [['examples/hierarchy/inner/test_inner_simple.py']] * 7 + \ - [['examples/hierarchy/test_in_class_in_class.py']] + \ - [['examples/test_simple.py']] * 2 +HIERARCHY_TESTS = ( + [ + ["examples/test_simple.py"], + ["examples/hierarchy/inner/test_inner_simple.py"], + ["examples/hierarchy/test_in_class.py"], + ["examples/hierarchy/test_in_class_in_class.py"], + [ + "examples/hierarchy/another_inner/test_another_inner_simple.py", + "examples/hierarchy/inner/test_inner_simple.py", + ], + ] + + [["examples/params/test_in_class_parameterized.py"]] * 5 + + [["examples/hierarchy/inner/test_inner_simple.py"]] * 7 + + [["examples/hierarchy/test_in_class_in_class.py"]] + + [["examples/test_simple.py"]] * 2 +) # noinspection PyTypeChecker -HIERARCHY_TEST_VARIABLES = \ - [dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES)] * 6 + \ - [ - dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True, 'rp_hierarchy_dirs_level': 1}, - **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True, 'rp_hierarchy_dirs_level': 2}, - **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True, 'rp_hierarchy_dirs_level': 999}, - **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True, 'rp_hierarchy_dirs_level': -1}, - **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dir_path_separator': '/', 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dir_path_separator': '\\', 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs_level': 1, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs_level': 2, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs_level': 999, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs_level': -1, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict(**utils.DEFAULT_VARIABLES), - dict(**utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_test_file': False}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_test_file': False, 'rp_hierarchy_dirs_level': 1}, **utils.DEFAULT_VARIABLES) - ] +HIERARCHY_TEST_VARIABLES = [ + dict({"rp_hierarchy_dirs": True, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES) +] * 6 + [ + dict( + {"rp_hierarchy_dirs": True, "rp_hierarchy_code": True, "rp_hierarchy_dirs_level": 1}, **utils.DEFAULT_VARIABLES + ), + dict( + {"rp_hierarchy_dirs": True, "rp_hierarchy_code": True, "rp_hierarchy_dirs_level": 2}, **utils.DEFAULT_VARIABLES + ), + dict( + {"rp_hierarchy_dirs": True, "rp_hierarchy_code": True, "rp_hierarchy_dirs_level": 999}, + **utils.DEFAULT_VARIABLES, + ), + dict( + {"rp_hierarchy_dirs": True, "rp_hierarchy_code": True, "rp_hierarchy_dirs_level": -1}, + **utils.DEFAULT_VARIABLES, + ), + dict({"rp_hierarchy_dir_path_separator": "/", "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dir_path_separator": "\\", "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dirs_level": 1, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dirs_level": 2, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dirs_level": 999, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dirs_level": -1, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict(**utils.DEFAULT_VARIABLES), + dict(**utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_test_file": False}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_test_file": False, "rp_hierarchy_dirs_level": 1}, **utils.DEFAULT_VARIABLES), +] HIERARCHY_TEST_EXPECTED_ITEMS = [ [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('test_simple.py')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "test_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + {"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("test_simple.py")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'hierarchy', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'inner', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('inner')}, - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('test_inner_simple.py')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "hierarchy", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + {"name": "inner", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("hierarchy")}, + {"name": "test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("inner")}, + {"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("test_inner_simple.py")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'hierarchy', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_in_class.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'Tests', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('test_in_class.py')}, - {'name': 'test_in_class', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "hierarchy", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + {"name": "test_in_class.py", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("hierarchy")}, + {"name": "Tests", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("test_in_class.py")}, + {"name": "test_in_class", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("Tests")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'hierarchy', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_in_class_in_class.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_in_class.py')}, - {'name': 'Test', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('Tests')}, - {'name': 'test_in_class_in_class', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('Test')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "hierarchy", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + { + "name": "test_in_class_in_class.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("hierarchy"), + }, + {"name": "Tests", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("test_in_class_in_class.py")}, + {"name": "Test", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("Tests")}, + {"name": "test_in_class_in_class", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("Test")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'hierarchy', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'another_inner', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'test_another_inner_simple.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('another_inner')}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('test_another_inner_simple.py')}, - {'name': 'inner', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('inner')}, - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('test_inner_simple.py')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "hierarchy", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + {"name": "another_inner", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("hierarchy")}, + { + "name": "test_another_inner_simple.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("another_inner"), + }, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("test_another_inner_simple.py"), + }, + {"name": "inner", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("hierarchy")}, + {"name": "test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("inner")}, + {"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("test_inner_simple.py")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'params', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('params')}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "params", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + { + "name": "test_in_class_parameterized.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("params"), + }, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'params', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('params')}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "params", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "test_in_class_parameterized.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("params"), + }, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "test_in_class_parameterized.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "test_in_class_parameterized.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'params', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('params')}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "params", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + { + "name": "test_in_class_parameterized.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("params"), + }, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'examples/hierarchy/inner/test_inner_simple.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('examples/hierarchy/inner/test_inner_simple.py')} + { + "name": "examples/hierarchy/inner/test_inner_simple.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x is None, + }, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("examples/hierarchy/inner/test_inner_simple.py"), + }, ], [ - {'name': 'examples\\hierarchy\\inner\\test_inner_simple.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('examples\\hierarchy\\inner\\test_inner_simple.py')} + { + "name": "examples\\hierarchy\\inner\\test_inner_simple.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x is None, + }, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("examples\\hierarchy\\inner\\test_inner_simple.py"), + }, ], [ - {'name': 'hierarchy/inner/test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('hierarchy/inner/test_inner_simple.py')} + {"name": "hierarchy/inner/test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("hierarchy/inner/test_inner_simple.py"), + }, ], [ - {'name': 'inner/test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('inner/test_inner_simple.py')} + {"name": "inner/test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("inner/test_inner_simple.py"), + }, ], [ - {'name': 'test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('test_inner_simple.py')} + {"name": "test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("test_inner_simple.py")}, ], [ - {'name': 'examples/hierarchy/inner/test_inner_simple.py', - 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('examples/hierarchy/inner/test_inner_simple.py')} + { + "name": "examples/hierarchy/inner/test_inner_simple.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x is None, + }, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("examples/hierarchy/inner/test_inner_simple.py"), + }, ], [ - {'name': 'examples/hierarchy/inner/test_inner_simple.py::test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x is None} + { + "name": "examples/hierarchy/inner/test_inner_simple.py::test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x is None, + } ], [ - {'name': 'examples/hierarchy/test_in_class_in_class.py::Tests::Test::test_in_class_in_class', - 'item_type': 'STEP', 'parent_item_id': lambda x: x is None} + { + "name": "examples/hierarchy/test_in_class_in_class.py::Tests::Test::test_in_class_in_class", + "item_type": "STEP", + "parent_item_id": lambda x: x is None, + } ], - [ - {'name': 'examples/test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x is None} - ], - [ - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x is None} - ] + [{"name": "examples::test_simple", "item_type": "STEP", "parent_item_id": lambda x: x is None}], + [{"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x is None}], ] HIERARCHY_TEST_PARAMETERS = [ diff --git a/tests/integration/test_attributes.py b/tests/integration/test_attributes.py index 2c6c4a5..a09e58c 100644 --- a/tests/integration/test_attributes.py +++ b/tests/integration/test_attributes.py @@ -15,6 +15,8 @@ from unittest import mock +import pytest + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @@ -25,17 +27,17 @@ def test_custom_attribute_report(mock_client_init): :param mock_client_init: Pytest fixture """ - variables = {'markers': 'scope: to which test scope a test relates'} + variables = {"markers": "scope: to which test scope a test relates"} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/attributes/test_one_attribute.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/attributes/test_one_attribute.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['attributes'] == [{'key': 'scope', 'value': 'smoke'}] + assert step_call_args["attributes"] == [{"key": "scope", "value": "smoke"}] @mock.patch(REPORT_PORTAL_SERVICE) @@ -44,20 +46,17 @@ def test_custom_attribute_not_reported_if_skip_configured(mock_client_init): :param mock_client_init: Pytest fixture """ - variables = { - 'markers': 'scope: to which test scope a test relates', - 'rp_ignore_attributes': 'scope' - } + variables = {"markers": "scope: to which test scope a test relates", "rp_ignore_attributes": "scope"} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/attributes/test_one_attribute.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/attributes/test_one_attribute.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['attributes'] == [] + assert step_call_args["attributes"] == [] @mock.patch(REPORT_PORTAL_SERVICE) @@ -66,23 +65,21 @@ def test_two_attributes_different_values_report(mock_client_init): :param mock_client_init: Pytest fixture """ - variables = {'markers': 'scope: to which test scope a test relates'} + variables = {"markers": "scope: to which test scope a test relates"} variables.update(utils.DEFAULT_VARIABLES.items()) result = utils.run_pytest_tests( - tests=['examples/attributes/test_two_attributes_with_same_key.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + tests=["examples/attributes/test_two_attributes_with_same_key.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - actual_attributes = step_call_args['attributes'] + actual_attributes = step_call_args["attributes"] - assert utils.attributes_to_tuples(actual_attributes) == { - ('scope', 'smoke'), - ('scope', 'regression') - } + assert utils.attributes_to_tuples(actual_attributes) == {("scope", "smoke"), ("scope", "regression")} @mock.patch(REPORT_PORTAL_SERVICE) @@ -91,19 +88,17 @@ def test_skip_attribute(mock_client_init): :param mock_client_init: Pytest fixture """ - result = utils.run_pytest_tests(tests=['examples/skip/test_simple_skip.py']) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/skip/test_simple_skip.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - actual_attributes = step_call_args['attributes'] + actual_attributes = step_call_args["attributes"] - assert utils.attributes_to_tuples(actual_attributes) == { - (None, 'skip') - } + assert utils.attributes_to_tuples(actual_attributes) == {(None, "skip")} @mock.patch(REPORT_PORTAL_SERVICE) @@ -112,10 +107,10 @@ def test_custom_runtime_attribute_report(mock_client_init): :param mock_client_init: Pytest fixture """ - variables = {'markers': 'scope: to which test scope a test relates\nruntime: runtime attribute mark'} + variables = {"markers": "scope: to which test scope a test relates\nruntime: runtime attribute mark"} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/attributes/test_runtime_attribute.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/attributes/test_runtime_attribute.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' @@ -123,14 +118,53 @@ def test_custom_runtime_attribute_report(mock_client_init): start_call_args = mock_client.start_test_item.call_args_list start_step_call_args = start_call_args[-1][1] - assert start_step_call_args['attributes'] == [ - {'key': 'scope', 'value': 'smoke'} - ] + assert start_step_call_args["attributes"] == [{"key": "scope", "value": "smoke"}] finish_call_args = mock_client.finish_test_item.call_args_list finish_step_call_args = finish_call_args[-1][1] - actual_attributes = finish_step_call_args['attributes'] - attribute_tuple_list = [(kv.get('key'), kv['value']) - for kv in actual_attributes] + actual_attributes = finish_step_call_args["attributes"] + attribute_tuple_list = [(kv.get("key"), kv["value"]) for kv in actual_attributes] + + assert set(attribute_tuple_list) == {("scope", "smoke"), (None, "runtime")} + + +@pytest.mark.parametrize("rp_hierarchy_code", [True, False]) +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes(mock_client_init, rp_hierarchy_code): + """Verify configuration attributes are reported. + + :param mock_client_init: Pytest fixture + """ + variables = {"rp_tests_attributes": "test_key:test_value", "rp_hierarchy_code": rp_hierarchy_code} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" - assert set(attribute_tuple_list) == {('scope', 'smoke'), (None, 'runtime')} + mock_client = mock_client_init.return_value + assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' + + call_args = mock_client.start_test_item.call_args_list + step_call_args = call_args[-1][1] + assert step_call_args["attributes"] == [{"key": "test_key", "value": "test_value"}] + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes_add(mock_client_init): + """Verify configuration attributes are reported along with custom attribute. + + :param mock_client_init: Pytest fixture + """ + variables = {"markers": "scope: to which test scope a test relates", "rp_tests_attributes": "test_key:test_value"} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/attributes/test_one_attribute.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + mock_client = mock_client_init.return_value + assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' + + call_args = mock_client.start_test_item.call_args_list + step_call_args = call_args[-1][1] + attributes = step_call_args["attributes"] + assert len(attributes) == 2 + assert {"key": "scope", "value": "smoke"} in attributes + assert {"key": "test_key", "value": "test_value"} in attributes diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py new file mode 100644 index 0000000..beb9584 --- /dev/null +++ b/tests/integration/test_bdd.py @@ -0,0 +1,1132 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.metadata +from collections import defaultdict +from typing import Optional +from unittest import mock + +import pytest +from reportportal_client import set_current +from reportportal_client.steps import StepReporter + +from tests import REPORT_PORTAL_SERVICE +from tests.helpers import utils + +pytest_bdd_version = [int(p) for p in importlib.metadata.version("pytest-bdd").split(".")] + +ITEM_ID_DICT = defaultdict(lambda: 0) +ITEM_ID_LIST = [] + + +def generate_item_id(*args, **kwargs) -> str: + global ITEM_ID_DICT + global ITEM_ID_LIST + if args: + name = args[0] + else: + name = kwargs["name"] + count = ITEM_ID_DICT[name] + count += 1 + ITEM_ID_DICT[name] = count + item_id = f"{name}_{count}" + ITEM_ID_LIST.append(item_id) + return item_id + + +def get_last_item_id() -> Optional[str]: + global ITEM_ID_LIST + if len(ITEM_ID_LIST) > 0: + return ITEM_ID_LIST[-1] + + +def remove_last_item_id(*_, **__) -> Optional[str]: + global ITEM_ID_LIST + if len(ITEM_ID_LIST) > 0: + return ITEM_ID_LIST.pop() + + +def setup_mock(mock_client_init): + mock_client = mock_client_init.return_value + mock_client.step_reporter = StepReporter(mock_client) + set_current(mock_client) + return mock_client + + +def setup_mock_for_logging(mock_client_init): + mock_client = setup_mock(mock_client_init) + mock_client.start_test_item.side_effect = generate_item_id + mock_client.finish_test_item.side_effect = remove_last_item_id + mock_client.current_item.side_effect = get_last_item_id + return mock_client + + +STEP_NAMES = [ + "Given there are 5 cucumbers", + "When I eat 3 cucumbers", + "And I eat 2 cucumbers", + "Then I should have 0 cucumbers", +] + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_basic(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + assert mock_client.start_test_item.call_count == 5, 'There should be exactly five "start_test_item" calls' + assert ( + mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count + ), '"start_test_item" and "finish_test_item" should be called the same number of times' + + scenario_call = mock_client.start_test_item.call_args_list[0] + code_ref = "features/arguments_four_steps.feature/[SCENARIO:Arguments for given, when, and, then]" + assert scenario_call[1]["item_type"] == "STEP", "First call should be a Scenario" + assert scenario_call[1].get("has_stats", True) is True, "First call should have stats" + assert scenario_call[1]["code_ref"] == code_ref + assert scenario_call[1]["test_case_id"] == code_ref + assert scenario_call[1]["name"] == "Feature: Four step arguments - Scenario: Arguments for given, when, and, then" + assert scenario_call[1]["parent_item_id"] is None + assert scenario_call[1]["parameters"] is None + assert scenario_call[1]["description"] is None + + step_calls = mock_client.start_test_item.call_args_list[1:] + for i, call in enumerate(step_calls): + assert call[0][0] == STEP_NAMES[i] + assert call[0][2] == "step", "All other calls should be Steps" + assert call[1]["has_stats"] is False, "All other calls should not have stats" + + finish_calls = mock_client.finish_test_item.call_args_list + finish_step_calls = finish_calls[:-1] + for i, call in enumerate(finish_step_calls): + assert call[0][0] == f"{STEP_NAMES[i]}_1" + assert call[1]["status"] == "PASSED" + finish_scenario_call = finish_calls[-1] + assert finish_scenario_call[1]["status"] == "PASSED" + assert ( + finish_scenario_call[1]["item_id"] + == "Feature: Four step arguments - Scenario: Arguments for given, when, and, then_1" + ) + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_basic_with_feature_suite(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + assert mock_client.start_test_item.call_count == 6, 'There should be exactly six "start_test_item" calls' + assert ( + mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count + ), '"start_test_item" and "finish_test_item" should be called the same number of times' + + suite_call = mock_client.start_test_item.call_args_list[0] + assert suite_call[1]["item_type"] == "SUITE", "First call should be a Feature" + assert suite_call[1].get("has_stats", True) is True, "First call should have stats" + assert suite_call[1]["parent_item_id"] is None + assert suite_call[1]["name"] == "Feature: Four step arguments" + + scenario_call = mock_client.start_test_item.call_args_list[1] + code_ref = "features/arguments_four_steps.feature/[SCENARIO:Arguments for given, when, and, then]" + assert scenario_call[1]["item_type"] == "STEP", "First call should be a Scenario" + assert scenario_call[1].get("has_stats", True) is True, "First call should have stats" + assert scenario_call[1]["code_ref"] == code_ref + assert scenario_call[1]["test_case_id"] == code_ref + assert scenario_call[1]["name"] == "Scenario: Arguments for given, when, and, then" + assert scenario_call[1]["parent_item_id"] == "Feature: Four step arguments_1" + assert scenario_call[1]["parameters"] is None + assert scenario_call[1]["description"] is None + + step_calls = mock_client.start_test_item.call_args_list[2:] + for call in step_calls: + assert call[0][2] == "step", "All other calls should be Steps" + assert call[1]["has_stats"] is False, "All other calls should not have stats" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_descriptions(mock_client_init): + mock_client = setup_mock(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments_description.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + code_ref = "features/arguments_four_steps_description.feature/[SCENARIO:Arguments for given, when, and, then]" + scenario_call = mock_client.start_test_item.call_args_list[0] + assert scenario_call[1]["code_ref"] == code_ref + assert scenario_call[1]["test_case_id"] == code_ref + description = scenario_call[1]["description"] + assert description == "Description for the scenario" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_feature_descriptions(mock_client_init): + mock_client = setup_mock(mock_client_init) + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_arguments_description.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + feature_call = mock_client.start_test_item.call_args_list[0] + assert feature_call[1]["description"] == "Description for the feature" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_failed_feature(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_failed_step.py"]) + assert int(result) == 1, "Exit code should be 1 (test error)" + + assert mock_client.start_test_item.call_count == 2, 'There should be exactly two "start_test_item" calls' + assert ( + mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count + ), '"start_test_item" and "finish_test_item" should be called the same number of times' + + finish_calls = mock_client.finish_test_item.call_args_list + finish_step_call = finish_calls[0] + finish_scenario_call = finish_calls[1] + + assert finish_step_call[0][0] == "Given I have a failed step_1" + assert finish_step_call[1]["status"] == "FAILED" + assert finish_scenario_call[1]["item_id"] == "Feature: Test failed scenario - Scenario: The scenario_1" + assert finish_scenario_call[1]["status"] == "FAILED" + + log_count = mock_client.log.call_count + # 1 - debug log from pytest-bdd's scenario module; 2 - traceback log from the agent; 3 - error log from pytest + assert log_count == 1 + 1 + 1, 'Incorrect number of "log" calls' + + log_call_args_list = mock_client.log.call_args_list[1:] + assert log_call_args_list[0][1]["level"] == "ERROR" + assert log_call_args_list[0][1]["message"].endswith("AssertionError: assert False\n") + assert log_call_args_list[0][1]["item_id"] == "Given I have a failed step_1" + + assert log_call_args_list[1][1]["level"] == "ERROR" + assert log_call_args_list[1][1]["message"].endswith("AssertionError") + assert log_call_args_list[1][1]["item_id"] == "Feature: Test failed scenario - Scenario: The scenario_1" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_attributes(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + + test_file = "examples/bdd/step_defs/test_belly.py" + result = utils.run_pytest_tests(tests=[test_file]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call = mock_client.start_test_item.call_args_list[0] + scenario_attrs = scenario_call[1].get("attributes", None) + assert scenario_attrs is not None + assert len(scenario_attrs) == 2 + assert {"value": "ok"} in scenario_attrs + assert {"key": "key", "value": "value"} in scenario_attrs + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_feature_attributes(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + test_file = "examples/bdd/step_defs/test_belly.py" + result = utils.run_pytest_tests(tests=[test_file], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + feature_call = mock_client.start_test_item.call_args_list[0] + feature_attrs = feature_call[1].get("attributes", []) + assert feature_attrs is not None + assert len(feature_attrs) == 3 + assert {"value": "smoke"} in feature_attrs + assert {"value": "test"} in feature_attrs + assert {"key": "feature", "value": "belly"} in feature_attrs + + scenario_call = mock_client.start_test_item.call_args_list[1] + scenario_attrs = scenario_call[1].get("attributes", []) + assert scenario_attrs is not None + assert len(scenario_attrs) == 2 + assert {"value": "ok"} in scenario_attrs + assert {"key": "key", "value": "value"} in scenario_attrs + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_background_step(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + + test_file = "examples/bdd/step_defs/test_background.py" + result = utils.run_pytest_tests(tests=[test_file]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify the first scenario + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert scenario_call_1[1]["name"] == "Feature: Test scenario with a background - Scenario: The first scenario" + assert scenario_call_1[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + + # Verify the Background step for the first scenario + background_call_1 = mock_client.start_test_item.call_args_list[1] + assert background_call_1[0][0] == "Background" + assert background_call_1[0][2] == "step" + assert background_call_1[1]["has_stats"] is False + assert background_call_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + + # Verify the nested steps within the Background for the first scenario + nested_step_call_1 = mock_client.start_test_item.call_args_list[2] + assert nested_step_call_1[0][0] == "Given I have empty step" + assert nested_step_call_1[0][2] == "step" + assert nested_step_call_1[1]["parent_item_id"] == background_call_1[0][0] + "_1" + assert nested_step_call_1[1]["has_stats"] is False + + # Verify the step within the first scenario + scenario_step_call_1 = mock_client.start_test_item.call_args_list[3] + assert scenario_step_call_1[0][0] == "Then I have another empty step" + assert scenario_step_call_1[0][2] == "step" + assert scenario_step_call_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert scenario_step_call_1[1]["has_stats"] is False + + # Verify the second scenario + scenario_call_2 = mock_client.start_test_item.call_args_list[4] + assert scenario_call_2[1]["name"] == "Feature: Test scenario with a background - Scenario: The second scenario" + assert scenario_call_2[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + + # Verify the Background step for the second scenario + background_call_2 = mock_client.start_test_item.call_args_list[5] + assert background_call_2[0][0] == "Background" + assert background_call_2[0][2] == "step" + assert background_call_2[1]["has_stats"] is False + assert background_call_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_1" + + # Verify the nested steps within the Background for the second scenario + nested_step_call_2 = mock_client.start_test_item.call_args_list[6] + assert nested_step_call_2[0][0] == "Given I have empty step" + assert nested_step_call_2[0][2] == "step" + assert nested_step_call_2[1]["parent_item_id"] == background_call_2[0][0] + "_2" + assert nested_step_call_2[1]["has_stats"] is False + + # Verify the step within the second scenario + scenario_step_call_2 = mock_client.start_test_item.call_args_list[7] + assert scenario_step_call_2[0][0] == "Then I have one more empty step" + assert scenario_step_call_2[0][2] == "step" + assert scenario_step_call_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_1" + assert scenario_step_call_2[1]["has_stats"] is False + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_background_two_steps(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + + test_file = "examples/bdd/step_defs/test_background_two_steps.py" + result = utils.run_pytest_tests(tests=[test_file]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify the scenario + scenario_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call[1]["name"] == "Feature: Test scenario with a background with two steps - Scenario: The scenario" + ) + assert scenario_call[1]["item_type"] == "STEP" + assert scenario_call[1].get("has_stats", True) + + # Verify the Background step + background_call = mock_client.start_test_item.call_args_list[1] + assert background_call[0][0] == "Background" + assert background_call[0][2] == "step" + assert background_call[1]["has_stats"] is False + assert background_call[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + + # Verify the first nested step within the Background + nested_step_call_1 = mock_client.start_test_item.call_args_list[2] + assert nested_step_call_1[0][0] == "Given I have first empty step" + assert nested_step_call_1[0][2] == "step" + assert nested_step_call_1[1]["parent_item_id"] == background_call[0][0] + "_3" + assert nested_step_call_1[1]["has_stats"] is False + + # Verify the second nested step within the Background + nested_step_call_2 = mock_client.start_test_item.call_args_list[3] + assert nested_step_call_2[0][0] == "And I have second empty step" + assert nested_step_call_2[0][2] == "step" + assert nested_step_call_2[1]["parent_item_id"] == background_call[0][0] + "_3" + assert nested_step_call_2[1]["has_stats"] is False + + # Verify the scenario step + scenario_step_call = mock_client.start_test_item.call_args_list[4] + assert scenario_step_call[0][0] == "Then I have main step" + assert scenario_step_call[0][2] == "step" + assert scenario_step_call[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + assert scenario_step_call[1]["has_stats"] is False + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rule(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_rule_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario from first rule + scenario_1_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_1_call[1]["name"] + == "Feature: Test rule keyword - Rule: The first rule - Scenario: The first scenario" + ) + assert scenario_1_call[1]["item_type"] == "STEP" + assert scenario_1_call[1].get("has_stats", True) is True + assert scenario_1_call[1]["parent_item_id"] is None + assert ( + scenario_1_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The first rule]/[SCENARIO:The first scenario]" + ) + + # Verify first scenario steps + step_1_given = mock_client.start_test_item.call_args_list[1] + assert step_1_given[0][0] == "Given I have empty step" + assert step_1_given[0][2] == "step" + assert step_1_given[1]["parent_item_id"] == scenario_1_call[1]["name"] + "_1" + assert step_1_given[1]["has_stats"] is False + + step_1_then = mock_client.start_test_item.call_args_list[2] + assert step_1_then[0][0] == "Then I have another empty step" + assert step_1_then[0][2] == "step" + assert step_1_then[1]["parent_item_id"] == scenario_1_call[1]["name"] + "_1" + assert step_1_then[1]["has_stats"] is False + + # Verify second scenario from first rule + scenario_2_call = mock_client.start_test_item.call_args_list[3] + assert ( + scenario_2_call[1]["name"] + == "Feature: Test rule keyword - Rule: The first rule - Scenario: The second scenario" + ) + assert scenario_2_call[1]["item_type"] == "STEP" + assert scenario_2_call[1].get("has_stats", True) is True + assert scenario_2_call[1]["parent_item_id"] is None + assert ( + scenario_2_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The first rule]/[SCENARIO:The second scenario]" + ) + + # Verify second scenario steps + step_2_given = mock_client.start_test_item.call_args_list[4] + assert step_2_given[0][0] == "Given I have empty step" + assert step_2_given[0][2] == "step" + assert step_2_given[1]["parent_item_id"] == scenario_2_call[1]["name"] + "_1" + assert step_2_given[1]["has_stats"] is False + + step_2_then = mock_client.start_test_item.call_args_list[5] + assert step_2_then[0][0] == "Then I have one more empty step" + assert step_2_then[0][2] == "step" + assert step_2_then[1]["parent_item_id"] == scenario_2_call[1]["name"] + "_1" + assert step_2_then[1]["has_stats"] is False + + # Verify third scenario from second rule + scenario_3_call = mock_client.start_test_item.call_args_list[6] + assert ( + scenario_3_call[1]["name"] + == "Feature: Test rule keyword - Rule: The second rule - Scenario: The third scenario" + ) + assert scenario_3_call[1]["item_type"] == "STEP" + assert scenario_3_call[1].get("has_stats", True) is True + assert scenario_3_call[1]["parent_item_id"] is None + assert ( + scenario_3_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The second rule]/[SCENARIO:The third scenario]" + ) + + # Verify third scenario steps + step_3_given = mock_client.start_test_item.call_args_list[7] + assert step_3_given[0][0] == "Given I have empty step" + assert step_3_given[0][2] == "step" + assert step_3_given[1]["parent_item_id"] == scenario_3_call[1]["name"] + "_1" + assert step_3_given[1]["has_stats"] is False + + step_3_then = mock_client.start_test_item.call_args_list[8] + assert step_3_then[0][0] == "Then I have one more else empty step" + assert step_3_then[0][2] == "step" + assert step_3_then[1]["parent_item_id"] == scenario_3_call[1]["name"] + "_1" + assert step_3_then[1]["has_stats"] is False + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rule_hierarchy(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_rule_steps.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify Feature + feature_call = mock_client.start_test_item.call_args_list[0] + assert feature_call[1]["name"] == "Feature: Test rule keyword" + assert feature_call[1]["item_type"] == "SUITE" + assert feature_call[1].get("has_stats", True) is True + assert feature_call[1]["parent_item_id"] is None + feature_id = "Feature: Test rule keyword_1" + + # Verify first Rule + rule_1_call = mock_client.start_test_item.call_args_list[1] + assert rule_1_call[1]["name"] == "Rule: The first rule" + assert rule_1_call[1]["item_type"] == "SUITE" + assert rule_1_call[1].get("has_stats", True) is True + assert rule_1_call[1]["parent_item_id"] == feature_id + rule_1_id = "Rule: The first rule_1" + + # Verify first scenario under first rule + scenario_1_call = mock_client.start_test_item.call_args_list[2] + assert scenario_1_call[1]["name"] == "Scenario: The first scenario" + assert scenario_1_call[1]["item_type"] == "STEP" + assert scenario_1_call[1].get("has_stats", True) is True + assert scenario_1_call[1]["parent_item_id"] == rule_1_id + assert ( + scenario_1_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The first rule]/[SCENARIO:The first scenario]" + ) + + # Verify second scenario under first rule + scenario_2_call = mock_client.start_test_item.call_args_list[5] + assert scenario_2_call[1]["name"] == "Scenario: The second scenario" + assert scenario_2_call[1]["item_type"] == "STEP" + assert scenario_2_call[1].get("has_stats", True) is True + assert scenario_2_call[1]["parent_item_id"] == rule_1_id + assert ( + scenario_2_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The first rule]/[SCENARIO:The second scenario]" + ) + + # Verify second Rule + rule_2_call = mock_client.start_test_item.call_args_list[8] + assert rule_2_call[1]["name"] == "Rule: The second rule" + assert rule_2_call[1]["item_type"] == "SUITE" + assert rule_2_call[1].get("has_stats", True) is True + assert rule_2_call[1]["parent_item_id"] == feature_id + rule_2_id = "Rule: The second rule_1" + + # Verify third scenario under second rule + scenario_3_call = mock_client.start_test_item.call_args_list[9] + assert scenario_3_call[1]["name"] == "Scenario: The third scenario" + assert scenario_3_call[1]["item_type"] == "STEP" + assert scenario_3_call[1].get("has_stats", True) is True + assert scenario_3_call[1]["parent_item_id"] == rule_2_id + assert ( + scenario_3_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The second rule]/[SCENARIO:The third scenario]" + ) + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_parameters(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_parameters_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario with parameters + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call_1[1]["name"] + == "Feature: Basic test with parameters - Scenario Outline: Test with different parameters" + ) + assert scenario_call_1[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + assert ( + scenario_call_1[1]["code_ref"] + == "features/scenario_outline_parameters.feature/[EXAMPLE:Test with different parameters" + '[parameters:123;str:"first"]]' + ) + parameters = scenario_call_1[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"first"') in parameters + assert ("parameters", "123") in parameters + assert scenario_call_1[1]["description"] is not None + assert scenario_call_1[1]["description"].endswith('|\xa0"first"\xa0|\xa0\xa0\xa0\xa0123\xa0\xa0\xa0\xa0\xa0|') + + # Verify steps for first scenario + given_step_1 = mock_client.start_test_item.call_args_list[1] + assert given_step_1[0][0] == "Given It is test with parameters" + assert given_step_1[0][2] == "step" + assert given_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert given_step_1[1]["has_stats"] is False + + when_step_1 = mock_client.start_test_item.call_args_list[2] + assert when_step_1[0][0] == 'When I have parameter "first"' + assert when_step_1[0][2] == "step" + assert when_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert when_step_1[1]["has_stats"] is False + + then_step_1 = mock_client.start_test_item.call_args_list[3] + assert then_step_1[0][0] == "Then I emit number 123 on level info" + assert then_step_1[0][2] == "step" + assert then_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert then_step_1[1]["has_stats"] is False + + # Verify second scenario with parameters + scenario_call_2 = mock_client.start_test_item.call_args_list[4] + assert ( + scenario_call_2[1]["name"] + == "Feature: Basic test with parameters - Scenario Outline: Test with different parameters" + ) + assert scenario_call_2[1]["item_type"] == "STEP" + assert scenario_call_2[1].get("has_stats", True) + assert ( + scenario_call_2[1]["code_ref"] + == "features/scenario_outline_parameters.feature/[EXAMPLE:Test with different parameters" + '[parameters:12345;str:"second"]]' + ) + parameters = scenario_call_2[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"second"') in parameters + assert ("parameters", "12345") in parameters + assert scenario_call_2[1]["description"] is not None + assert scenario_call_2[1]["description"].endswith('|\xa0"second"\xa0|\xa0\xa0\xa012345\xa0\xa0\xa0\xa0|') + + # Verify steps for second scenario + given_step_2 = mock_client.start_test_item.call_args_list[5] + assert given_step_2[0][0] == "Given It is test with parameters" + assert given_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert given_step_2[1]["has_stats"] is False + + when_step_2 = mock_client.start_test_item.call_args_list[6] + assert when_step_2[0][0] == 'When I have parameter "second"' + assert when_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert when_step_2[1]["has_stats"] is False + + then_step_2 = mock_client.start_test_item.call_args_list[7] + assert then_step_2[0][0] == "Then I emit number 12345 on level info" + assert then_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert then_step_2[1]["has_stats"] is False + + # Verify third scenario with parameters + scenario_call_3 = mock_client.start_test_item.call_args_list[8] + assert ( + scenario_call_3[1]["name"] + == "Feature: Basic test with parameters - Scenario Outline: Test with different parameters" + ) + assert scenario_call_3[1]["item_type"] == "STEP" + assert scenario_call_3[1].get("has_stats", True) + assert ( + scenario_call_3[1]["code_ref"] + == "features/scenario_outline_parameters.feature/[EXAMPLE:Test with different parameters" + '[parameters:12345678;str:"third"]]' + ) + parameters = scenario_call_3[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"third"') in parameters + assert ("parameters", "12345678") in parameters + assert scenario_call_3[1]["description"] is not None + assert scenario_call_3[1]["description"].endswith('|\xa0"third"\xa0|\xa0\xa012345678\xa0\xa0|') + + # Verify steps for third scenario + given_step_3 = mock_client.start_test_item.call_args_list[9] + assert given_step_3[0][0] == "Given It is test with parameters" + assert given_step_3[1]["parent_item_id"] == scenario_call_3[1]["name"] + "_3" + assert given_step_3[1]["has_stats"] is False + + when_step_3 = mock_client.start_test_item.call_args_list[10] + assert when_step_3[0][0] == 'When I have parameter "third"' + assert when_step_3[1]["parent_item_id"] == scenario_call_3[1]["name"] + "_3" + assert when_step_3[1]["has_stats"] is False + + then_step_3 = mock_client.start_test_item.call_args_list[11] + assert then_step_3[0][0] == "Then I emit number 12345678 on level info" + assert then_step_3[1]["parent_item_id"] == scenario_call_3[1]["name"] + "_3" + assert then_step_3[1]["has_stats"] is False + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_examples_tags(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/example_tags_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario + scenario_call = mock_client.start_test_item.call_args_list[0] + scenario_attrs = scenario_call[1].get("attributes", None) + assert scenario_attrs is not None + assert len(scenario_attrs) == 1 + assert {"value": "test"} in scenario_attrs + + # Verify second scenario + scenario_call = mock_client.start_test_item.call_args_list[4] + scenario_attrs = scenario_call[1].get("attributes", None) + assert scenario_attrs is not None + assert len(scenario_attrs) == 1 + assert {"value": "test"} in scenario_attrs + + # Verify third scenario + scenario_call = mock_client.start_test_item.call_args_list[8] + scenario_attrs = scenario_call[1].get("attributes", None) + assert scenario_attrs is not None + assert len(scenario_attrs) == 1 + assert {"value": "test"} in scenario_attrs + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_background_steps(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_background_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario with background + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call_1[1]["name"] + == "Feature: Basic test with parameters and background - Scenario Outline: Test with different parameters" + ) + assert scenario_call_1[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + assert ( + scenario_call_1[1]["code_ref"] + == "features/scenario_outline_background.feature/[EXAMPLE:Test with different parameters" + '[parameters:123;str:"first"]]' + ) + parameters = scenario_call_1[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"first"') in parameters + assert ("parameters", "123") in parameters + + # Verify the Background step + background_call = mock_client.start_test_item.call_args_list[1] + assert background_call[0][0] == "Background" + assert background_call[0][2] == "step" + assert background_call[1]["has_stats"] is False + assert background_call[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + + # Verify background step for first scenario + background_step_1 = mock_client.start_test_item.call_args_list[2] + assert background_step_1[0][0] == "Given I have empty step in background" + assert background_step_1[1]["parent_item_id"] == background_call[0][0] + "_4" + assert background_step_1[1]["has_stats"] is False + + # Verify regular steps for first scenario + given_step_1 = mock_client.start_test_item.call_args_list[3] + assert given_step_1[0][0] == "Given It is test with parameters" + assert given_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert given_step_1[1]["has_stats"] is False + + when_step_1 = mock_client.start_test_item.call_args_list[4] + assert when_step_1[0][0] == 'When I have parameter "first"' + assert when_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert when_step_1[1]["has_stats"] is False + + then_step_1 = mock_client.start_test_item.call_args_list[5] + assert then_step_1[0][0] == "Then I emit number 123 on level info" + assert then_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert then_step_1[1]["has_stats"] is False + + # Verify second scenario with background + scenario_call_2 = mock_client.start_test_item.call_args_list[6] + assert ( + scenario_call_2[1]["name"] + == "Feature: Basic test with parameters and background - Scenario Outline: Test with different parameters" + ) + assert scenario_call_2[1]["item_type"] == "STEP" + assert scenario_call_2[1].get("has_stats", True) + assert ( + scenario_call_2[1]["code_ref"] + == "features/scenario_outline_background.feature/[EXAMPLE:Test with different parameters" + '[parameters:12345;str:"second"]]' + ) + parameters = scenario_call_2[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"second"') in parameters + assert ("parameters", "12345") in parameters + + # Verify the Background step + background_call = mock_client.start_test_item.call_args_list[7] + assert background_call[0][0] == "Background" + assert background_call[0][2] == "step" + assert background_call[1]["has_stats"] is False + assert background_call[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + + # Verify background step for second scenario + background_step_2 = mock_client.start_test_item.call_args_list[8] + assert background_step_2[0][0] == "Given I have empty step in background" + assert background_step_2[1]["parent_item_id"] == background_call[0][0] + "_5" + assert background_step_2[1]["has_stats"] is False + + # Verify steps for second scenario + given_step_2 = mock_client.start_test_item.call_args_list[9] + assert given_step_2[0][0] == "Given It is test with parameters" + assert given_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert given_step_2[1]["has_stats"] is False + + when_step_2 = mock_client.start_test_item.call_args_list[10] + assert when_step_2[0][0] == 'When I have parameter "second"' + assert when_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert when_step_2[1]["has_stats"] is False + + then_step_2 = mock_client.start_test_item.call_args_list[11] + assert then_step_2[0][0] == "Then I emit number 12345 on level info" + assert then_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert then_step_2[1]["has_stats"] is False + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_description(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_description_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario with description + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + description_1 = scenario_call_1[1]["description"] + assert description_1.startswith("The description for the scenario outline") + assert "Parameters:\n\n" in description_1 + assert "|\xa0\xa0\xa0str\xa0\xa0\xa0|\xa0parameters\xa0|" in description_1 + assert '|\xa0"first"\xa0|\xa0\xa0\xa0\xa0123\xa0\xa0\xa0\xa0\xa0|' in description_1 + + scenario_call_2 = mock_client.start_test_item.call_args_list[4] + description_2 = scenario_call_2[1]["description"] + assert description_2.startswith("The description for the scenario outline") + assert "Parameters:\n\n" in description_2 + assert "|\xa0\xa0\xa0str\xa0\xa0\xa0\xa0|\xa0parameters\xa0|" in description_2 + assert '|\xa0"second"\xa0|\xa0\xa0\xa012345\xa0\xa0\xa0\xa0|' in description_2 + + # Verify the steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rule_description(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_rule_description_steps.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify Feature call + feature_call = mock_client.start_test_item.call_args_list[0] + assert feature_call[1]["name"] == "Feature: Test rule keyword" + assert feature_call[1]["code_ref"].endswith("rule_description.feature") + + # Verify Rule call + rule_call = mock_client.start_test_item.call_args_list[1] + assert rule_call[1]["name"] == "Rule: The first rule" + assert rule_call[1]["description"] == "Description for the Rule" + assert rule_call[1]["item_type"] == "SUITE" + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_dynamic_name(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_name_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call_1[1]["name"] + == 'Feature: Dynamic scenario outline names - Scenario Outline: Test with the parameter "first"' + ) + assert scenario_call_1[1]["item_type"] == "STEP" + assert ( + scenario_call_1[1]["code_ref"] == "features/dynamic_scenario_outline_names.feature/" + '[EXAMPLE:Test with the parameter "first"[parameters:123;str:"first"]]' + ) + assert scenario_call_1[1]["parameters"] == {"str": '"first"', "parameters": "123"} + assert scenario_call_1[1]["description"] == ( + "Parameters:\n\n" + "\xa0\xa0\xa0\xa0|\xa0\xa0\xa0str\xa0\xa0\xa0|\xa0parameters\xa0|\n" + "\xa0\xa0\xa0\xa0|---------|------------|\n" + '\xa0\xa0\xa0\xa0|\xa0"first"\xa0|\xa0\xa0\xa0\xa0123\xa0\xa0\xa0\xa0\xa0|' + ) + + scenario_call_2 = mock_client.start_test_item.call_args_list[4] + assert ( + scenario_call_2[1]["name"] + == 'Feature: Dynamic scenario outline names - Scenario Outline: Test with the parameter "second"' + ) + assert scenario_call_2[1]["item_type"] == "STEP" + assert ( + scenario_call_2[1]["code_ref"] == "features/dynamic_scenario_outline_names.feature/" + '[EXAMPLE:Test with the parameter "second"[parameters:12345;str:"second"]]' + ) + assert scenario_call_2[1]["parameters"] == {"str": '"second"', "parameters": "12345"} + assert scenario_call_2[1]["description"] == ( + "Parameters:\n\n" + "\xa0\xa0\xa0\xa0|\xa0\xa0\xa0str\xa0\xa0\xa0\xa0|\xa0parameters\xa0|\n" + "\xa0\xa0\xa0\xa0|----------|------------|\n" + '\xa0\xa0\xa0\xa0|\xa0"second"\xa0|\xa0\xa0\xa012345\xa0\xa0\xa0\xa0|' + ) + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_fail(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_fail_steps.py"]) + assert int(result) == 1, "Exit code should be 1 (test error)" + + # Verify first scenario with parameters + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call_1[1]["name"] + == "Feature: Basic test with parameters which fails - Scenario Outline: Test with different parameters failing" + ) + assert scenario_call_1[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + assert ( + scenario_call_1[1]["code_ref"] + == "features/scenario_outline_fail.feature/[EXAMPLE:Test with different parameters failing" + '[parameters:123;str:"first"]]' + ) + + # Check failure logging for first scenario + finish_step_call_1 = mock_client.finish_test_item.call_args_list[3] + assert finish_step_call_1[1]["status"] == "FAILED" + assert finish_step_call_1[0][0].startswith("Then I fail") + + finish_scenario_call_1 = mock_client.finish_test_item.call_args_list[4] + assert finish_scenario_call_1[1]["status"] == "FAILED" + assert finish_scenario_call_1[1]["item_id"] == scenario_call_1[1]["name"] + "_1" + + log_calls = [ + log_call + for log_call in mock_client.log.call_args_list + if "level" in log_call[1] and log_call[1]["level"] == "ERROR" + ] + assert len(log_calls) >= 2, "Should have at least 2 error log calls" + + error_log = log_calls[0][1] + assert "AssertionError: This step always fails" in error_log["message"] + assert error_log["item_id"].startswith("Then I fail") + + final_error_log = log_calls[1][1] + assert final_error_log["level"] == "ERROR" + assert final_error_log["message"].endswith("AssertionError") + assert final_error_log["item_id"] == scenario_call_1[1]["name"] + "_1" + + # Verify first scenario with parameters + scenario_call_2 = mock_client.start_test_item.call_args_list[5] + assert ( + scenario_call_2[1]["name"] + == "Feature: Basic test with parameters which fails - Scenario Outline: Test with different parameters failing" + ) + assert scenario_call_2[1]["item_type"] == "STEP" + assert scenario_call_2[1].get("has_stats", True) + assert ( + scenario_call_2[1]["code_ref"] + == "features/scenario_outline_fail.feature/[EXAMPLE:Test with different parameters failing" + '[parameters:12345;str:"second"]]' + ) + + # Check failure logging for first scenario + finish_step_call_2 = mock_client.finish_test_item.call_args_list[5 + 3] + assert finish_step_call_2[1]["status"] == "FAILED" + assert finish_step_call_2[0][0].startswith("Then I fail") + + finish_scenario_call_2 = mock_client.finish_test_item.call_args_list[5 + 4] + assert finish_scenario_call_2[1]["status"] == "FAILED" + assert finish_scenario_call_2[1]["item_id"] == scenario_call_2[1]["name"] + "_2" + + error_log = log_calls[2][1] + assert "AssertionError: This step always fails" in error_log["message"] + assert error_log["item_id"].startswith("Then I fail") + + final_error_log = log_calls[3][1] + assert final_error_log["level"] == "ERROR" + assert final_error_log["message"].endswith("AssertionError") + assert final_error_log["item_id"] == scenario_call_1[1]["name"] + "_2" + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_doc_string_parameters(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/doc_string_parameters_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify scenario + scenario_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call[1]["name"] + == "Feature: Basic test with a docstring parameter - Scenario: Test with a docstring parameter" + ) + assert scenario_call[1]["item_type"] == "STEP" + assert scenario_call[1].get("has_stats", True) + assert ( + scenario_call[1]["code_ref"] + == "features/doc_string_parameters.feature/[SCENARIO:Test with a docstring parameter]" + ) + + # Verify step + given_step = mock_client.start_test_item.call_args_list[1] + assert given_step[0][0] == "Given I have a docstring parameter:" + assert given_step[0][2] == "step" + assert given_step[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + assert given_step[1]["has_stats"] is False + + # Verify steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_data_table_parameter_steps(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/data_table_parameter_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify scenario + scenario_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call[1]["name"] + == "Feature: A basic test with a Data Table parameter - Scenario: Test with Data Table" + ) + assert scenario_call[1]["item_type"] == "STEP" + assert scenario_call[1].get("has_stats", True) + assert scenario_call[1]["code_ref"] == "features/data_table_parameter.feature/[SCENARIO:Test with Data Table]" + + # Verify step + given_step = mock_client.start_test_item.call_args_list[1] + assert given_step[0][0] == "Given a step with a data table:" + assert given_step[0][2] == "step" + assert given_step[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + assert given_step[1]["has_stats"] is False + + # Verify steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_test_case_id(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_test_case_id_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario with parameters includes the test case ID + scenario_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call[1]["name"] + == "Feature: Basic test with parameters - Scenario Outline: Test with different parameters" + ) + assert scenario_call[1]["item_type"] == "STEP" + assert ( + scenario_call[1]["code_ref"] + == "features/scenario_outline_test_case_id.feature/[EXAMPLE:Test with different parameters" + '[parameters:123;str:"first"]]' + ) + assert scenario_call[1]["parameters"] == {"str": '"first"', "parameters": "123"} + + # Verify the test case ID is correctly reported using the tag instead of code_ref + assert scenario_call[1]["test_case_id"] == 'outline_tc_id[parameters:123;str:"first"]' + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_custom_test_case_id(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/custom_test_case_id_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify scenario includes the test case ID + scenario_call = mock_client.start_test_item.call_args_list[0] + assert scenario_call[1]["name"] == "Feature: Test dummy scenario - Scenario: The scenario" + assert scenario_call[1]["item_type"] == "STEP" + assert scenario_call[1]["code_ref"] == "features/custom_test_case_id.feature/[SCENARIO:The scenario]" + + # Verify the test case ID is correctly reported using the tag instead of code_ref + assert scenario_call[1]["test_case_id"] == "my_tc_id" + + # Verify step info + step_call = mock_client.start_test_item.call_args_list[1] + assert step_call[0][0] == "Given I have empty step" + assert step_call[0][2] == "step" + assert step_call[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + assert step_call[1]["has_stats"] is False + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes_rule(mock_client_init): + mock_client = setup_mock(mock_client_init) + variables = {"rp_tests_attributes": "test_key:test_value"} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_rule_description_steps.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call = mock_client.start_test_item.call_args_list[0] + assert scenario_call[1]["attributes"] == [{"key": "test_key", "value": "test_value"}] + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@pytest.mark.parametrize("rp_hierarchy_code, scenario_idx", [(True, 2), (False, 0)]) +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes_rule_hierarchy(mock_client_init, rp_hierarchy_code, scenario_idx): + mock_client = setup_mock(mock_client_init) + variables = {"rp_tests_attributes": "test_key:test_value", "rp_hierarchy_code": rp_hierarchy_code} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_rule_description_steps.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call = mock_client.start_test_item.call_args_list[scenario_idx] + assert scenario_call[1]["attributes"] == [{"key": "test_key", "value": "test_value"}] + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes_bdd_tags(mock_client_init): + mock_client = setup_mock(mock_client_init) + variables = {"rp_tests_attributes": "test_key:test_value"} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_belly.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call = mock_client.start_test_item.call_args_list[0] + attributes = scenario_call[1]["attributes"] + assert len(attributes) == 3 + assert {"key": "test_key", "value": "test_value"} in attributes + assert {"value": "ok"} in attributes + assert {"key": "key", "value": "value"} in attributes diff --git a/tests/integration/test_case_id_report.py b/tests/integration/test_case_id_report.py index 345294e..b1ddffd 100644 --- a/tests/integration/test_case_id_report.py +++ b/tests/integration/test_case_id_report.py @@ -13,33 +13,50 @@ """This module includes integration tests for Test Case ID report.""" -import pytest from unittest import mock -from examples.test_case_id import test_case_id_decorator, \ - test_case_id_decorator_params_false, test_case_id_decorator_params_no, \ - test_case_id_decorator_params_partially, test_case_id_decorator_params_true +import pytest + +from examples.test_case_id import ( + test_case_id_decorator, + test_case_id_decorator_params_false, + test_case_id_decorator_params_no, + test_case_id_decorator_params_partially, + test_case_id_decorator_params_true, +) from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize(['test', 'expected_id'], [ - ('examples/test_simple.py', 'examples/test_simple.py:test_simple'), - ('examples/params/test_in_class_parameterized.py', - 'examples/params/test_in_class_parameterized.py:Tests.test_in_class_parameterized[param]'), - ('examples/test_case_id/test_case_id_decorator.py', test_case_id_decorator.TEST_CASE_ID), - ('examples/test_case_id/test_case_id_decorator_params_false.py', test_case_id_decorator_params_false.TEST_CASE_ID), - ('examples/test_case_id/test_case_id_decorator_params_no.py', test_case_id_decorator_params_no.TEST_CASE_ID), - ('examples/test_case_id/test_case_id_decorator_params_partially.py', - test_case_id_decorator_params_partially.TEST_CASE_ID + '[value1]'), - ('examples/test_case_id/test_case_id_decorator_params_true.py', - test_case_id_decorator_params_true.TEST_CASE_ID + '[value1,value2]'), - ('examples/test_case_id/test_case_id_decorator_no_id.py', ''), - ('examples/test_case_id/test_case_id_decorator_no_id_params_false.py', ''), - ('examples/test_case_id/test_case_id_decorator_no_id_params_true.py', '[value1,value2]'), - ('examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py', '[value2]') -]) +@pytest.mark.parametrize( + ["test", "expected_id"], + [ + ("examples/test_simple.py", "examples/test_simple.py:test_simple"), + ( + "examples/params/test_in_class_parameterized.py", + "examples/params/test_in_class_parameterized.py:Tests.test_in_class_parameterized[param]", + ), + ("examples/test_case_id/test_case_id_decorator.py", test_case_id_decorator.TEST_CASE_ID), + ( + "examples/test_case_id/test_case_id_decorator_params_false.py", + test_case_id_decorator_params_false.TEST_CASE_ID, + ), + ("examples/test_case_id/test_case_id_decorator_params_no.py", test_case_id_decorator_params_no.TEST_CASE_ID), + ( + "examples/test_case_id/test_case_id_decorator_params_partially.py", + test_case_id_decorator_params_partially.TEST_CASE_ID + "[value1]", + ), + ( + "examples/test_case_id/test_case_id_decorator_params_true.py", + test_case_id_decorator_params_true.TEST_CASE_ID + "[value1,value2]", + ), + ("examples/test_case_id/test_case_id_decorator_no_id.py", ""), + ("examples/test_case_id/test_case_id_decorator_no_id_params_false.py", ""), + ("examples/test_case_id/test_case_id_decorator_no_id_params_true.py", "[value1,value2]"), + ("examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py", "[value2]"), + ], +) def test_parameters(mock_client_init, test, expected_id): """Verify different tests have correct Test Case IDs. @@ -48,11 +65,11 @@ def test_parameters(mock_client_init, test, expected_id): :param expected_id: an expected Test Case ID """ result = utils.run_pytest_tests(tests=[test]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['test_case_id'] == expected_id + assert step_call_args["test_case_id"] == expected_id diff --git a/tests/integration/test_code_reference.py b/tests/integration/test_code_reference.py index 4a3af89..b197e25 100644 --- a/tests/integration/test_code_reference.py +++ b/tests/integration/test_code_reference.py @@ -13,22 +13,30 @@ """This module includes integration tests for code references generation.""" -import pytest from unittest import mock +import pytest + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize(['test', 'code_ref'], [ - ('examples/test_simple.py', 'examples/test_simple.py:test_simple'), - ('examples/params/test_in_class_parameterized.py', - 'examples/params/test_in_class_parameterized.py:Tests.test_in_class_parameterized'), - ('examples/hierarchy/test_in_class.py', 'examples/hierarchy/test_in_class.py:Tests.test_in_class'), - ('examples/hierarchy/test_in_class_in_class.py', - 'examples/hierarchy/test_in_class_in_class.py:Tests.Test.test_in_class_in_class') -]) +@pytest.mark.parametrize( + ["test", "code_ref"], + [ + ("examples/test_simple.py", "examples/test_simple.py:test_simple"), + ( + "examples/params/test_in_class_parameterized.py", + "examples/params/test_in_class_parameterized.py:Tests.test_in_class_parameterized", + ), + ("examples/hierarchy/test_in_class.py", "examples/hierarchy/test_in_class.py:Tests.test_in_class"), + ( + "examples/hierarchy/test_in_class_in_class.py", + "examples/hierarchy/test_in_class_in_class.py:Tests.Test.test_in_class_in_class", + ), + ], +) def test_code_reference(mock_client_init, test, code_ref): """Verify different tests have correct code reference. @@ -37,11 +45,11 @@ def test_code_reference(mock_client_init, test, code_ref): :param code_ref: an expected code reference value """ result = utils.run_pytest_tests(tests=[test]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['code_ref'] == code_ref + assert step_call_args["code_ref"] == code_ref diff --git a/tests/integration/test_config_handling.py b/tests/integration/test_config_handling.py index 1ae30e8..fe27aa8 100644 --- a/tests/integration/test_config_handling.py +++ b/tests/integration/test_config_handling.py @@ -17,14 +17,14 @@ from unittest import mock import pytest -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect from reportportal_client import OutputType from examples.test_rp_logging import LOG_MESSAGE from tests import REPORT_PORTAL_SERVICE, REQUESTS_SERVICE from tests.helpers import utils -TEST_LAUNCH_ID = 'test_launch_id' +TEST_LAUNCH_ID = "test_launch_id" @mock.patch(REQUESTS_SERVICE) @@ -34,16 +34,16 @@ def test_rp_launch_id(mock_requests_init): :param mock_requests_init: mocked requests lib """ variables = dict() - variables['rp_launch_id'] = TEST_LAUNCH_ID + variables["rp_launch_id"] = TEST_LAUNCH_ID variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_simple.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_requests = mock_requests_init.return_value assert mock_requests.post.call_count == 1 item_start = mock_requests.post.call_args_list[0] - assert item_start[0][0].endswith('/item') - assert item_start[1]['json']['launchUuid'] == TEST_LAUNCH_ID + assert item_start[0][0].endswith("/item") + assert item_start[1]["json"]["launchUuid"] == TEST_LAUNCH_ID @mock.patch(REPORT_PORTAL_SERVICE) @@ -54,17 +54,15 @@ def test_rp_parent_item_id(mock_client_init): """ parent_id = "parent_id" variables = dict() - variables['rp_parent_item_id'] = parent_id + variables["rp_parent_item_id"] = parent_id variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_simple.py'], variables=variables) + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value - expect(mock_client.start_launch.call_count == 1, - '"start_launch" method was not called') - expect(mock_client.finish_launch.call_count == 1, - '"finish_launch" method was not called') + expect(mock_client.start_launch.call_count == 1, '"start_launch" method was not called') + expect(mock_client.finish_launch.call_count == 1, '"finish_launch" method was not called') start_call_args = mock_client.start_test_item.call_args_list finish_call_args = mock_client.finish_test_item.call_args_list @@ -82,139 +80,128 @@ def test_rp_parent_item_id_and_rp_launch_id(mock_requests_init): """ parent_id = "parent_id" variables = dict() - variables['rp_parent_item_id'] = parent_id - variables['rp_launch_id'] = TEST_LAUNCH_ID + variables["rp_parent_item_id"] = parent_id + variables["rp_launch_id"] = TEST_LAUNCH_ID variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_simple.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_requests = mock_requests_init.return_value assert mock_requests.post.call_count == 1 item_start = mock_requests.post.call_args_list[0] - assert item_start[0][0].endswith(f'/item/{parent_id}') - assert item_start[1]['json']['launchUuid'] == TEST_LAUNCH_ID + assert item_start[0][0].endswith(f"/item/{parent_id}") + assert item_start[1]["json"]["launchUuid"] == TEST_LAUNCH_ID @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_log_format(mock_client_init): - log_format = '(%(name)s) %(message)s (%(filename)s:%(lineno)s)' - variables = {'rp_log_format': log_format} + log_format = "(%(name)s) %(message)s (%(filename)s:%(lineno)s)" + variables = {"rp_log_format": log_format} variables.update(utils.DEFAULT_VARIABLES.items()) mock_client = mock_client_init.return_value - result = utils.run_tests_with_client( - mock_client, ['examples/test_rp_logging.py'], variables=variables) + result = utils.run_tests_with_client(mock_client, ["examples/test_rp_logging.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client.log.call_count == 1) message = mock_client.log.call_args_list[0][0][1] expect(len(message) > 0) - expect(message == f'(test_rp_logging) {LOG_MESSAGE} (test_rp_logging.py:24)') + expect(message == f"(test_rp_logging) {LOG_MESSAGE} (test_rp_logging.py:24)") assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_log_batch_payload_size(mock_client_init): log_size = 123456 - variables = {'rp_log_batch_payload_size': log_size} + variables = {"rp_log_batch_payload_size": log_size} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['log_batch_payload_size'] == log_size) + expect(constructor_args["log_batch_payload_size"] == log_size) assert_expectations() def filter_agent_call(warn): - category = getattr(warn, 'category', None) + category = getattr(warn, "category", None) if category: - return category.__name__ == 'DeprecationWarning' or category.__name__ == 'RuntimeWarning' + return category.__name__ == "DeprecationWarning" or category.__name__ == "RuntimeWarning" return False def filter_agent_calls(warning_list): - return list( - filter( - lambda call: filter_agent_call(call), - warning_list - ) - ) + return list(filter(lambda call: filter_agent_call(call), warning_list)) @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_api_key(mock_client_init): - api_key = 'rp_api_key' + api_key = "rp_api_key" variables = dict(utils.DEFAULT_VARIABLES) - variables.update({'rp_api_key': api_key}.items()) + variables.update({"rp_api_key": api_key}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['api_key'] == api_key) + expect(constructor_args["api_key"] == api_key) expect(len(filter_agent_calls(w)) == 0) assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_uuid(mock_client_init): - api_key = 'rp_api_key' + api_key = "rp_api_key" variables = dict(utils.DEFAULT_VARIABLES) - del variables['rp_api_key'] - variables.update({'rp_uuid': api_key}.items()) + del variables["rp_api_key"] + variables.update({"rp_uuid": api_key}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['api_key'] == api_key) + expect(constructor_args["api_key"] == api_key) expect(len(filter_agent_calls(w)) == 1) assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_api_key_priority(mock_client_init): - api_key = 'rp_api_key' + api_key = "rp_api_key" variables = dict(utils.DEFAULT_VARIABLES) - variables.update({'rp_api_key': api_key, 'rp_uuid': 'rp_uuid'}.items()) + variables.update({"rp_api_key": api_key, "rp_uuid": "rp_uuid"}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['api_key'] == api_key) + expect(constructor_args["api_key"] == api_key) expect(len(filter_agent_calls(w)) == 0) assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_api_key_empty(mock_client_init): - api_key = '' + api_key = "" variables = dict(utils.DEFAULT_VARIABLES) - variables.update({'rp_api_key': api_key}.items()) + variables.update({"rp_api_key": api_key}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 0) expect(len(filter_agent_calls(w)) == 1) @@ -225,16 +212,16 @@ def test_rp_api_key_empty(mock_client_init): def test_rp_api_retries(mock_client_init): retries = 5 variables = dict(utils.DEFAULT_VARIABLES) - variables.update({'rp_api_retries': str(retries)}.items()) + variables.update({"rp_api_retries": str(retries)}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['retries'] == retries) + expect(constructor_args["retries"] == retries) expect(len(filter_agent_calls(w)) == 0) assert_expectations() @@ -243,30 +230,29 @@ def test_rp_api_retries(mock_client_init): def test_retries(mock_client_init): retries = 5 variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'retries': str(retries)}.items()) + variables.update({"retries": str(retries)}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['retries'] == retries) + expect(constructor_args["retries"] == retries) expect(len(filter_agent_calls(w)) == 1) assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_issue_system_url_warning(mock_client_init): - url = 'https://bugzilla.some.com/show_bug.cgi?id={issue_id}' + url = "https://bugzilla.some.com/show_bug.cgi?id={issue_id}" variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'rp_issue_system_url': str(url)}.items()) + variables.update({"rp_issue_system_url": str(url)}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_issue_id.py'], variables=variables) - assert int(result) == 1, 'Exit code should be 1 (test failure)' + result = utils.run_pytest_tests(["examples/test_issue_id.py"], variables=variables) + assert int(result) == 1, "Exit code should be 1 (test failure)" expect(mock_client_init.call_count == 1) expect(len(filter_agent_calls(w)) == 1) @@ -277,12 +263,12 @@ def test_rp_issue_system_url_warning(mock_client_init): def test_launch_uuid_print(mock_client_init): print_uuid = True variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'rp_launch_uuid_print': str(print_uuid)}.items()) - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + variables.update({"rp_launch_uuid_print": str(print_uuid)}.items()) + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) - expect(mock_client_init.call_args_list[0][1]['launch_uuid_print'] == print_uuid) - expect(mock_client_init.call_args_list[0][1]['print_output'] is None) + expect(mock_client_init.call_args_list[0][1]["launch_uuid_print"] == print_uuid) + expect(mock_client_init.call_args_list[0][1]["print_output"] is None) assert_expectations() @@ -290,12 +276,12 @@ def test_launch_uuid_print(mock_client_init): def test_launch_uuid_print_stderr(mock_client_init): print_uuid = True variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'rp_launch_uuid_print': str(print_uuid), 'rp_launch_uuid_print_output': 'stderr'}.items()) - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + variables.update({"rp_launch_uuid_print": str(print_uuid), "rp_launch_uuid_print_output": "stderr"}.items()) + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) - expect(mock_client_init.call_args_list[0][1]['launch_uuid_print'] == print_uuid) - expect(mock_client_init.call_args_list[0][1]['print_output'] is OutputType.STDERR) + expect(mock_client_init.call_args_list[0][1]["launch_uuid_print"] == print_uuid) + expect(mock_client_init.call_args_list[0][1]["print_output"] is OutputType.STDERR) assert_expectations() @@ -303,43 +289,37 @@ def test_launch_uuid_print_stderr(mock_client_init): def test_launch_uuid_print_invalid_output(mock_client_init): print_uuid = True variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'rp_launch_uuid_print': str(print_uuid), 'rp_launch_uuid_print_output': 'something'}.items()) - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 3, 'Exit code should be 3 (INTERNALERROR)' + variables.update({"rp_launch_uuid_print": str(print_uuid), "rp_launch_uuid_print_output": "something"}.items()) + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 3, "Exit code should be 3 (INTERNALERROR)" assert mock_client_init.call_count == 0 @mock.patch(REPORT_PORTAL_SERVICE) def test_no_launch_uuid_print(mock_client_init): variables = utils.DEFAULT_VARIABLES.copy() - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) - expect(mock_client_init.call_args_list[0][1]['launch_uuid_print'] is False) - expect(mock_client_init.call_args_list[0][1]['print_output'] is None) + expect(mock_client_init.call_args_list[0][1]["launch_uuid_print"] is False) + expect(mock_client_init.call_args_list[0][1]["print_output"] is None) assert_expectations() @pytest.mark.parametrize( - 'connect_value, read_value, expected_result', - [ - ('5', '15', (5.0, 15.0)), - ('5.5', '15.5', (5.5, 15.5)), - (None, None, None), - (None, '5', 5), - ('5', None, 5) - ] + "connect_value, read_value, expected_result", + [("5", "15", (5.0, 15.0)), ("5.5", "15.5", (5.5, 15.5)), (None, None, None), (None, "5", 5), ("5", None, 5)], ) @mock.patch(REPORT_PORTAL_SERVICE) def test_client_timeouts(mock_client_init, connect_value, read_value, expected_result): variables = utils.DEFAULT_VARIABLES.copy() if connect_value: - variables['rp_connect_timeout'] = connect_value + variables["rp_connect_timeout"] = connect_value if read_value: - variables['rp_read_timeout'] = read_value + variables["rp_read_timeout"] = read_value - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client_init.call_count == 1 - assert mock_client_init.call_args_list[0][1]['http_timeout'] == expected_result + assert mock_client_init.call_args_list[0][1]["http_timeout"] == expected_result diff --git a/tests/integration/test_connection_close.py b/tests/integration/test_connection_close.py index 33be309..bf6cf9c 100644 --- a/tests/integration/test_connection_close.py +++ b/tests/integration/test_connection_close.py @@ -21,7 +21,7 @@ def test_connection_close(mock_client_init): mock_client = mock_client_init.return_value - result = utils.run_tests_with_client(mock_client, ['examples/test_rp_logging.py']) + result = utils.run_tests_with_client(mock_client, ["examples/test_rp_logging.py"]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client.close.call_count == 1, '"close" method was not called at the end of the test' diff --git a/tests/integration/test_custom_name.py b/tests/integration/test_custom_name.py index d0c1c98..16699fd 100644 --- a/tests/integration/test_custom_name.py +++ b/tests/integration/test_custom_name.py @@ -23,15 +23,18 @@ from tests.helpers import utils -@pytest.mark.parametrize('test, expected', [ - ('examples/custom_name/test_custom_name_args.py', TEST_NAME_ARGS), - ('examples/custom_name/test_custom_name_kwargs.py', TEST_NAME_KWARGS), - ('examples/custom_name/test_custom_name_empty.py', TEST_NAME_EMPTY) -]) +@pytest.mark.parametrize( + "test, expected", + [ + ("examples/custom_name/test_custom_name_args.py", TEST_NAME_ARGS), + ("examples/custom_name/test_custom_name_kwargs.py", TEST_NAME_KWARGS), + ("examples/custom_name/test_custom_name_empty.py", TEST_NAME_EMPTY), + ], +) @mock.patch(REPORT_PORTAL_SERVICE) def test_custom_attribute_report(mock_client_init, test, expected): result = utils.run_pytest_tests(tests=[test], variables=utils.DEFAULT_VARIABLES) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value start_count = mock_client.start_test_item.call_count @@ -40,5 +43,5 @@ def test_custom_attribute_report(mock_client_init, test, expected): call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[0][1] - assert step_call_args['name'] == expected, 'Incorrect item name' - assert step_call_args['attributes'] == [], 'No attributes should be added for the test item' + assert step_call_args["name"] == expected, "Incorrect item name" + assert step_call_args["attributes"] == [], "No attributes should be added for the test item" diff --git a/tests/integration/test_debug_mode.py b/tests/integration/test_debug_mode.py index d380b31..c7b6563 100644 --- a/tests/integration/test_debug_mode.py +++ b/tests/integration/test_debug_mode.py @@ -22,11 +22,7 @@ @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize(['mode', 'expected_mode'], [ - ('DEFAULT', 'DEFAULT'), - ('DEBUG', 'DEBUG'), - (None, 'DEFAULT') -]) +@pytest.mark.parametrize(["mode", "expected_mode"], [("DEFAULT", "DEFAULT"), ("DEBUG", "DEBUG"), (None, "DEFAULT")]) def test_launch_mode(mock_client_init, mode, expected_mode): """Verify different launch modes are passed to `start_launch` method. @@ -37,13 +33,13 @@ def test_launch_mode(mock_client_init, mode, expected_mode): """ variables = dict() if mode is not None: - variables['rp_mode'] = mode + variables["rp_mode"] = mode variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_simple.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client_init.call_count == 1, "client wasn't initialized" init_kwargs = mock_client_init.call_args_list[0][1] - assert 'mode' in init_kwargs - assert init_kwargs['mode'] == expected_mode + assert "mode" in init_kwargs + assert init_kwargs["mode"] == expected_mode diff --git a/tests/integration/test_empty_run.py b/tests/integration/test_empty_run.py index b5e5b1a..e85b4c7 100644 --- a/tests/integration/test_empty_run.py +++ b/tests/integration/test_empty_run.py @@ -13,9 +13,10 @@ """This module includes integration tests for the empty run.""" -from delayed_assert import expect, assert_expectations from unittest import mock +from delayed_assert import assert_expectations, expect + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @@ -26,9 +27,9 @@ def test_empty_run(mock_client_init): :param mock_client_init: Pytest fixture """ - result = utils.run_pytest_tests(tests=['examples/empty/']) + result = utils.run_pytest_tests(tests=["examples/empty/"]) - assert int(result) == 5, 'Exit code should be 5 (no tests)' + assert int(result) == 5, "Exit code should be 5 (no tests)" mock_client = mock_client_init.return_value expect(mock_client.start_launch.call_count == 1, '"start_launch" method was not called') @@ -36,7 +37,7 @@ def test_empty_run(mock_client_init): assert_expectations() finish_args = mock_client.finish_launch.call_args_list - expect('status' not in finish_args[0][1], 'Launch status should not be defined') - launch_end_time = finish_args[0][1]['end_time'] - expect(launch_end_time is not None and int(launch_end_time) > 0, 'Launch end time is empty') + expect("status" not in finish_args[0][1], "Launch status should not be defined") + launch_end_time = finish_args[0][1]["end_time"] + expect(launch_end_time is not None and int(launch_end_time) > 0, "Launch end time is empty") assert_expectations() diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index d8d017c..f685f0f 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys from collections import defaultdict +from typing import Optional from unittest import mock import pytest @@ -22,13 +22,20 @@ from examples.fixtures.test_failure_fixture_teardown.conftest import ( LOG_MESSAGE_BEFORE_YIELD as LOG_MESSAGE_BEFORE_YIELD_TEST_FAILURE, - LOG_MESSAGE_TEARDOWN as LOG_MESSAGE_TEARDOWN_TEST_FAILURE) +) +from examples.fixtures.test_failure_fixture_teardown.conftest import ( + LOG_MESSAGE_TEARDOWN as LOG_MESSAGE_TEARDOWN_TEST_FAILURE, +) from examples.fixtures.test_fixture_return_none.conftest import LOG_MESSAGE_SETUP as LOG_MESSAGE_BEFORE_RETURN_NONE from examples.fixtures.test_fixture_setup.conftest import LOG_MESSAGE_SETUP as SINGLE_SETUP_MESSAGE from examples.fixtures.test_fixture_setup_failure.conftest import LOG_MESSAGE_SETUP as LOG_MESSAGE_SETUP_FAILURE from examples.fixtures.test_fixture_teardown.conftest import LOG_MESSAGE_BEFORE_YIELD, LOG_MESSAGE_TEARDOWN from examples.fixtures.test_fixture_teardown_failure.conftest import ( - LOG_MESSAGE_BEFORE_YIELD as LOG_MESSAGE_BEFORE_YIELD_FAILURE, LOG_MESSAGE_TEARDOWN as LOG_MESSAGE_TEARDOWN_FAILURE) + LOG_MESSAGE_BEFORE_YIELD as LOG_MESSAGE_BEFORE_YIELD_FAILURE, +) +from examples.fixtures.test_fixture_teardown_failure.conftest import ( + LOG_MESSAGE_TEARDOWN as LOG_MESSAGE_TEARDOWN_FAILURE, +) from examples.fixtures.test_fixture_yield_none.conftest import LOG_MESSAGE_SETUP as LOG_MESSAGE_BEFORE_YIELD_NONE from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @@ -41,9 +48,9 @@ def generate_item_id(*args, **kwargs) -> str: if args: name = args[0] else: - name = kwargs['name'] + name = kwargs["name"] ITEM_ID_DICT[name] += 1 - item_id = f'{name}_{ITEM_ID_DICT[name]}' + item_id = f"{name}_{ITEM_ID_DICT[name]}" ITEM_ID_LIST.append(item_id) return item_id @@ -52,7 +59,7 @@ def get_last_item_id() -> str: return ITEM_ID_LIST[-1] -def remove_last_item_id(*_, **__) -> str: +def remove_last_item_id(*_, **__) -> Optional[str]: if len(ITEM_ID_LIST) > 0: return ITEM_ID_LIST.pop() @@ -72,58 +79,62 @@ def setup_mock_for_logging(mock_client_init): return mock_client -@pytest.mark.parametrize('switch', [True, False]) +@pytest.mark.parametrize("switch", [True, False]) @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_on_off(mock_client_init, switch): mock_client = setup_mock(mock_client_init) variables = dict(utils.DEFAULT_VARIABLES) - variables['rp_report_fixtures'] = switch - result = utils.run_pytest_tests(tests=['examples/fixtures/test_fixture_teardown'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + variables["rp_report_fixtures"] = switch + result = utils.run_pytest_tests(tests=["examples/fixtures/test_fixture_teardown"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" start_count = mock_client.start_test_item.call_count + finish_calls = mock_client.finish_test_item.call_args_list finish_count = mock_client.finish_test_item.call_count expected_count = 3 if switch else 1 - assert start_count == finish_count == expected_count, \ - 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == expected_count, 'Incorrect number of "start_test_item" calls' + assert finish_count == expected_count, 'Incorrect number of "finish_test_item" calls' + for call in finish_calls: + assert call[1]["status"] == "PASSED" def run_tests(test_path, should_fail=False): variables = dict(utils.DEFAULT_VARIABLES) - variables['rp_report_fixtures'] = True + variables["rp_report_fixtures"] = True result = utils.run_pytest_tests(tests=[test_path], variables=variables) if should_fail: - assert int(result) == 1, 'Exit code should be 1 (test failure)' + assert int(result) == 1, "Exit code should be 1 (test failure)" else: - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_setup(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_setup' + test_path = "examples/fixtures/test_fixture_setup" run_tests(test_path) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'function fixture setup: {fixture_name}' + step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - assert teardown_call_args[0] == f'function fixture teardown: {fixture_name}' + assert teardown_call_args[0] == f"function fixture teardown: {fixture_name}" setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 1, 'Incorrect number of "log" calls' @@ -133,35 +144,36 @@ def test_fixture_setup(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == SINGLE_SETUP_MESSAGE - assert log_call_kwargs['item_id'] == f'{step_name}_1' + assert log_call_kwargs["item_id"] == f"{step_name}_1" @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_teardown(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_teardown' + test_path = "examples/fixtures/test_fixture_teardown" run_tests(test_path) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 2, 'Incorrect number of "log" calls' @@ -171,81 +183,98 @@ def test_fixture_teardown(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_YIELD - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" log_call_args = log_call_args_list[-1][0] log_call_kwargs = log_call_args_list[-1][1] assert log_call_args[1] == LOG_MESSAGE_TEARDOWN - assert log_call_kwargs['item_id'] == \ - 'examples/fixtures/test_fixture_teardown/test_fixture_teardown.py::test_fixture_teardown_1' + assert ( + log_call_kwargs["item_id"] + == "examples/fixtures/test_fixture_teardown/test_fixture_teardown.py::test_fixture_teardown_1" + ) + + +FIXTURE_FAILED_MESSAGE = "function fixture setup failed: test_fixture_setup_failure_config" -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_setup_failure(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_setup_failure' + test_path = "examples/fixtures/test_fixture_setup_failure" run_tests(test_path, True) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 2, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 2, 'Incorrect number of "start_test_item" calls' + assert finish_count == 2, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'function fixture setup: {fixture_name}' + step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count - assert log_count == 2, 'Incorrect number of "log" calls' + assert log_count == 4, 'Incorrect number of "log" calls' log_call_args_list = mock_client.log.call_args_list log_call_args = log_call_args_list[0][0] log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_SETUP_FAILURE - assert log_call_kwargs['item_id'] == f'{step_name}_1' + assert log_call_kwargs["item_id"] == f"{step_name}_1" log_call_kwargs = log_call_args_list[1][1] + assert log_call_kwargs["message"] == FIXTURE_FAILED_MESSAGE + assert log_call_kwargs["item_id"] == f"{step_name}_1" - assert log_call_kwargs['message'].endswith( - 'examples/fixtures/test_fixture_setup_failure/conftest.py:30: Exception') - assert log_call_kwargs['item_id'] == \ - 'examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py::test_fixture_setup_failure_1' + log_call_kwargs = log_call_args_list[2][1] + assert log_call_kwargs["message"].startswith("Traceback (most recent call last):") + assert log_call_kwargs["item_id"] == f"{step_name}_1" + + log_call_kwargs = log_call_args_list[3][1] + + assert log_call_kwargs["message"].endswith( + "examples/fixtures/test_fixture_setup_failure/conftest.py:30: Exception" + ) + assert ( + log_call_kwargs["item_id"] + == "examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py::test_fixture_setup_failure_1" + ) @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_teardown_failure(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_teardown_failure' + test_path = "examples/fixtures/test_fixture_teardown_failure" run_tests(test_path, True) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 3, 'Incorrect number of "log" calls' @@ -255,51 +284,55 @@ def test_fixture_teardown_failure(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_YIELD_FAILURE - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" log_call_args = log_call_args_list[1][0] log_call_kwargs = log_call_args_list[1][1] assert log_call_args[1] == LOG_MESSAGE_TEARDOWN_FAILURE - assert log_call_kwargs['item_id'] == \ - ('examples/fixtures/test_fixture_teardown_failure/test_fixture_teardown_failure.py::' - 'test_fixture_teardown_failure_1') + assert log_call_kwargs["item_id"] == ( + "examples/fixtures/test_fixture_teardown_failure/test_fixture_teardown_failure.py::" + "test_fixture_teardown_failure_1" + ) log_call_kwargs = log_call_args_list[2][1] - assert log_call_kwargs['message'].endswith( - 'examples/fixtures/test_fixture_teardown_failure/conftest.py:34: Exception') - assert log_call_kwargs['item_id'] == \ - ('examples/fixtures/test_fixture_teardown_failure/test_fixture_teardown_failure.py::' - 'test_fixture_teardown_failure_1') + assert log_call_kwargs["message"].endswith( + "examples/fixtures/test_fixture_teardown_failure/conftest.py:34: Exception" + ) + assert log_call_kwargs["item_id"] == ( + "examples/fixtures/test_fixture_teardown_failure/test_fixture_teardown_failure.py::" + "test_fixture_teardown_failure_1" + ) @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_yield_none(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_yield_none' + test_path = "examples/fixtures/test_fixture_yield_none" run_tests(test_path) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 1, 'Incorrect number of "log" calls' @@ -309,35 +342,36 @@ def test_fixture_yield_none(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_YIELD_NONE - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_return_none(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_return_none' + test_path = "examples/fixtures/test_fixture_return_none" run_tests(test_path) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 1, 'Incorrect number of "log" calls' @@ -347,35 +381,36 @@ def test_fixture_return_none(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_RETURN_NONE - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" @mock.patch(REPORT_PORTAL_SERVICE) def test_failure_fixture_teardown(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_failure_fixture_teardown' + test_path = "examples/fixtures/test_failure_fixture_teardown" run_tests(test_path, True) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 3, 'Incorrect number of "log" calls' @@ -385,59 +420,61 @@ def test_failure_fixture_teardown(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_YIELD_TEST_FAILURE - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" log_call_args = log_call_args_list[2][0] log_call_kwargs = log_call_args_list[2][1] assert log_call_args[1] == LOG_MESSAGE_TEARDOWN_TEST_FAILURE - assert log_call_kwargs['item_id'] == \ - ('examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::' - 'test_failure_fixture_teardown_1') + assert log_call_kwargs["item_id"] == ( + "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::" + "test_failure_fixture_teardown_1" + ) log_call_kwargs = log_call_args_list[1][1] - assert log_call_kwargs['message'].endswith( - 'examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py:28: AssertionError') - assert log_call_kwargs['item_id'] == \ - ('examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::' - 'test_failure_fixture_teardown_1') + assert log_call_kwargs["message"].endswith( + "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py:29: AssertionError" + ) + assert log_call_kwargs["item_id"] == ( + "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::" + "test_failure_fixture_teardown_1" + ) -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') @mock.patch(REPORT_PORTAL_SERVICE) def test_session_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) - test_path = 'examples/fixtures/session_fixture_return' + test_path = "examples/fixtures/session_fixture_return" run_tests(test_path) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 4, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 4, 'Incorrect number of "start_test_item" calls' + assert finish_count == 4, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'session fixture setup: {fixture_name}' + step_name = f"session fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - assert teardown_call_args[0] == f'session fixture teardown: {fixture_name}' + assert teardown_call_args[0] == f"session fixture teardown: {fixture_name}" setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') @mock.patch(REPORT_PORTAL_SERVICE) def test_package_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) - test_path = 'examples/fixtures/package_fixture_return' + test_path = "examples/fixtures/package_fixture_return" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -447,106 +484,123 @@ def test_package_fixture_setup(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'package fixture setup: {fixture_name}' + step_name = f"package fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - assert teardown_call_args[0] == f'package fixture teardown: {fixture_name}' + assert teardown_call_args[0] == f"package fixture teardown: {fixture_name}" setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') @mock.patch(REPORT_PORTAL_SERVICE) def test_module_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) - test_path = 'examples/fixtures/module_fixture_return' + test_path = "examples/fixtures/module_fixture_return" run_tests(test_path) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 4, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 4, 'Incorrect number of "start_test_item" calls' + assert finish_count == 4, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'module fixture setup: {fixture_name}' + step_name = f"module fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - assert teardown_call_args[0] == f'module fixture teardown: {fixture_name}' + assert teardown_call_args[0] == f"module fixture teardown: {fixture_name}" setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') @mock.patch(REPORT_PORTAL_SERVICE) def test_class_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) - test_path = 'examples/fixtures/class_fixture_return' + test_path = "examples/fixtures/class_fixture_return" run_tests(test_path) start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 8, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 8, 'Incorrect number of "start_test_item" calls' + assert finish_count == 8, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'class fixture setup: {fixture_name}' + step_name = f"class fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] setup_call_args = call_args[-3][0] assert setup_call_args[0] == step_name setup_call_kwargs = call_args[-3][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] - teardown_step_name = f'class fixture teardown: {fixture_name}' + teardown_step_name = f"class fixture teardown: {fixture_name}" teardown_call_args = call_args[-5][0] assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-5][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_setup_skip(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_skipped/test_fixture_skipped.py' + test_path = "examples/fixtures/test_fixture_skipped/test_fixture_skipped.py" run_tests(test_path, False) call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[2][0] - fixture_name = 'skip_fixture' - step_name = f'function fixture setup: {fixture_name}' + fixture_name = "skip_fixture" + step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[2][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 1, 'Incorrect number of "log" calls' call_args = mock_client.finish_test_item.call_args_list finish_call_kwargs = call_args[1][1] - assert finish_call_kwargs['status'] == 'PASSED' + assert finish_call_kwargs["status"] == "PASSED" finish_call_kwargs = call_args[-1][1] - assert finish_call_kwargs['status'] == 'SKIPPED' + assert finish_call_kwargs["status"] == "SKIPPED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_fixture_exit(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + + test_path = "examples/fixtures/test_fixture_exit/test_fixture_exit.py" + variables = dict(utils.DEFAULT_VARIABLES) + variables["rp_report_fixtures"] = True + result = utils.run_pytest_tests(tests=[test_path], variables=variables) + assert int(result) == 2, "Exit code should be 2 (unexpected exit)" + + call_args = mock_client.start_test_item.call_args_list + assert len(call_args) == 2, 'Incorrect number of "start_test_item" calls' + + call_args = mock_client.finish_test_item.call_args_list + assert len(call_args) == 2, 'Incorrect number of "finish_test_item" calls' diff --git a/tests/integration/test_issue_report.py b/tests/integration/test_issue_report.py index dde357d..bdf0476 100644 --- a/tests/integration/test_issue_report.py +++ b/tests/integration/test_issue_report.py @@ -16,7 +16,7 @@ from unittest import mock import pytest -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect from reportportal_client.core.rp_issues import Issue from examples import test_issue_id @@ -24,14 +24,14 @@ from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils -ISSUE_PLACEHOLDER = '{issue_id}' -ISSUE_URL_PATTERN = f'https://bugzilla.some.com/show_bug.cgi?id={ISSUE_PLACEHOLDER}' -BTS_PROJECT = 'RP-TEST' -BTS_URL = 'https://bugzilla.some.com' +ISSUE_PLACEHOLDER = "{issue_id}" +ISSUE_URL_PATTERN = f"https://bugzilla.some.com/show_bug.cgi?id={ISSUE_PLACEHOLDER}" +BTS_PROJECT = "RP-TEST" +BTS_URL = "https://bugzilla.some.com" @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize('issue_id_mark', [True, False]) +@pytest.mark.parametrize("issue_id_mark", [True, False]) def test_issue_id_attribute(mock_client_init, issue_id_mark): """Verify agent reports issue attribute if configured. @@ -42,20 +42,20 @@ def test_issue_id_attribute(mock_client_init, issue_id_mark): mock_client.start_test_item.side_effect = utils.item_id_gen mock_client.get_project_settings.side_effect = utils.project_settings - variables = {'rp_issue_id_marks': issue_id_mark} + variables = {"rp_issue_id_marks": issue_id_mark} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_issue_id.py'], variables=variables) - assert int(result) == 1, 'Exit code should be 1 (test failed)' + result = utils.run_pytest_tests(tests=["examples/test_issue_id.py"], variables=variables) + assert int(result) == 1, "Exit code should be 1 (test failed)" call_args = mock_client.start_test_item.call_args_list finish_test_step = call_args[-1][1] - attributes = finish_test_step['attributes'] + attributes = finish_test_step["attributes"] if issue_id_mark: assert len(attributes) == 1 issue_attribute = attributes[0] - expect(issue_attribute['key'] == 'issue') - expect(issue_attribute['value'] == test_issue_id.ID) + expect(issue_attribute["key"] == "issue") + expect(issue_attribute["value"] == test_issue_id.ID) assert_expectations() else: assert len(attributes) == 0 @@ -71,24 +71,25 @@ def test_issue_report(mock_client_init): mock_client.start_test_item.side_effect = utils.item_id_gen mock_client.get_project_settings.side_effect = utils.project_settings - variables = {'rp_issue_system_url': ISSUE_URL_PATTERN} + variables = {"rp_issue_system_url": ISSUE_URL_PATTERN} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_issue_id.py'], variables=variables) - assert int(result) == 1, 'Exit code should be 1 (test failed)' + result = utils.run_pytest_tests(tests=["examples/test_issue_id.py"], variables=variables) + assert int(result) == 1, "Exit code should be 1 (test failed)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - issue = finish_test_step['issue'] + issue = finish_test_step["issue"] assert isinstance(issue, Issue) - expect(issue.issue_type == 'pb001') + expect(issue.issue_type == "pb001") expect(issue.comment is not None) assert_expectations() - comments = issue.comment.split('\n') + comments = issue.comment.split("\n") assert len(comments) == 1 comment = comments[0] assert comment == "* {}: [{}]({})".format( - test_issue_id.REASON, test_issue_id.ID, ISSUE_URL_PATTERN.replace(ISSUE_PLACEHOLDER, test_issue_id.ID)) + test_issue_id.REASON, test_issue_id.ID, ISSUE_URL_PATTERN.replace(ISSUE_PLACEHOLDER, test_issue_id.ID) + ) @mock.patch(REPORT_PORTAL_SERVICE) @@ -101,21 +102,17 @@ def test_passed_no_issue_report(mock_client_init): mock_client.start_test_item.side_effect = utils.item_id_gen mock_client.get_project_settings.side_effect = utils.project_settings - variables = {'rp_issue_system_url': ISSUE_URL_PATTERN} + variables = {"rp_issue_system_url": ISSUE_URL_PATTERN} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_issue_id_pass.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no failures)' + result = utils.run_pytest_tests(tests=["examples/test_issue_id_pass.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no failures)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - assert 'issue' not in finish_test_step or finish_test_step['issue'] is None + assert "issue" not in finish_test_step or finish_test_step["issue"] is None -@pytest.mark.parametrize(('flag_value', 'expected_issue'), [ - (True, None), - (False, NOT_ISSUE), - (None, None) -]) +@pytest.mark.parametrize(("flag_value", "expected_issue"), [(True, None), (False, NOT_ISSUE), (None, None)]) @mock.patch(REPORT_PORTAL_SERVICE) def test_skipped_not_issue(mock_client_init, flag_value, expected_issue): """Verify 'rp_is_skipped_an_issue' option handling. @@ -129,15 +126,15 @@ def test_skipped_not_issue(mock_client_init, flag_value, expected_issue): variables = dict() if flag_value is not None: - variables['rp_is_skipped_an_issue'] = flag_value + variables["rp_is_skipped_an_issue"] = flag_value variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/skip/test_simple_skip.py'], variables=variables) + result = utils.run_pytest_tests(tests=["examples/skip/test_simple_skip.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no failures)' + assert int(result) == 0, "Exit code should be 0 (no failures)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - actual_issue = finish_test_step.get('issue', None) + actual_issue = finish_test_step.get("issue", None) assert actual_issue == expected_issue @@ -152,18 +149,18 @@ def test_skipped_custom_issue(mock_client_init): mock_client.get_project_settings.side_effect = utils.project_settings variables = dict() - variables['rp_is_skipped_an_issue'] = True - variables['rp_issue_system_url'] = ISSUE_URL_PATTERN + variables["rp_is_skipped_an_issue"] = True + variables["rp_issue_system_url"] = ISSUE_URL_PATTERN variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/skip/test_skip_issue.py'], variables=variables) + result = utils.run_pytest_tests(tests=["examples/skip/test_skip_issue.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no failures)' + assert int(result) == 0, "Exit code should be 0 (no failures)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - actual_issue = finish_test_step.get('issue', None) + actual_issue = finish_test_step.get("issue", None) assert isinstance(actual_issue, Issue) - expect(actual_issue.issue_type == 'pb001') + expect(actual_issue.issue_type == "pb001") expect(actual_issue.comment is not None) assert_expectations() @@ -178,28 +175,24 @@ def test_external_issue(mock_client_init): mock_client.start_test_item.side_effect = utils.item_id_gen mock_client.get_project_settings.side_effect = utils.project_settings - variables = { - 'rp_bts_project': BTS_PROJECT, - 'rp_bts_url': BTS_URL, - 'rp_bts_issue_url': ISSUE_URL_PATTERN - } + variables = {"rp_bts_project": BTS_PROJECT, "rp_bts_url": BTS_URL, "rp_bts_issue_url": ISSUE_URL_PATTERN} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_issue_id.py'], variables=variables) + result = utils.run_pytest_tests(tests=["examples/test_issue_id.py"], variables=variables) - assert int(result) == 1, 'Exit code should be 1 (test failed)' + assert int(result) == 1, "Exit code should be 1 (test failed)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - actual_issue = finish_test_step.get('issue', None) + actual_issue = finish_test_step.get("issue", None) assert isinstance(actual_issue, Issue) - expect(actual_issue.issue_type == 'pb001') + expect(actual_issue.issue_type == "pb001") expect(actual_issue.comment is not None) external_issues = actual_issue._external_issues expect(len(external_issues) == 1) assert_expectations() external_issue = external_issues[0] - expect(external_issue['btsUrl'] == BTS_URL) - expect(external_issue['btsProject'] == BTS_PROJECT) - expect(external_issue['ticketId'] == test_issue_id.ID) - expect(external_issue['url'] == ISSUE_URL_PATTERN.replace(ISSUE_PLACEHOLDER, test_issue_id.ID)) + expect(external_issue["btsUrl"] == BTS_URL) + expect(external_issue["btsProject"] == BTS_PROJECT) + expect(external_issue["ticketId"] == test_issue_id.ID) + expect(external_issue["url"] == ISSUE_URL_PATTERN.replace(ISSUE_PLACEHOLDER, test_issue_id.ID)) assert_expectations() diff --git a/tests/integration/test_max_name_length.py b/tests/integration/test_max_name_length.py index 11a1b9f..51430db 100644 --- a/tests/integration/test_max_name_length.py +++ b/tests/integration/test_max_name_length.py @@ -20,8 +20,8 @@ @mock.patch(REPORT_PORTAL_SERVICE) def test_custom_attribute_report(mock_client_init): - result = utils.run_pytest_tests(tests=['examples/test_max_item_name.py'], variables=utils.DEFAULT_VARIABLES) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/test_max_item_name.py"], variables=utils.DEFAULT_VARIABLES) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value start_count = mock_client.start_test_item.call_count @@ -30,4 +30,4 @@ def test_custom_attribute_report(mock_client_init): call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[0][1] - assert len(step_call_args['name']) == 1024, 'Incorrect item name length' + assert len(step_call_args["name"]) == 1024, "Incorrect item name length" diff --git a/tests/integration/test_parameters_report.py b/tests/integration/test_parameters_report.py index e4a7cba..b5ac571 100644 --- a/tests/integration/test_parameters_report.py +++ b/tests/integration/test_parameters_report.py @@ -13,22 +13,28 @@ """This module includes integration tests for parameters report.""" -import pytest from unittest import mock +import pytest + from examples.params.test_binary_symbol_in_parameters import BINARY_TEXT from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize(['test', 'expected_params'], [ - ('examples/test_simple.py', None), - ('examples/params/test_in_class_parameterized.py', {'param': 'param'}), - ('examples/params/test_different_parameter_types.py', - {'integer': 1, 'floating_point': 1.5, 'boolean': True, 'none': None}), - ('examples/params/test_binary_symbol_in_parameters.py', {'text': BINARY_TEXT.replace('\0', '\\0')}), -]) +@pytest.mark.parametrize( + ["test", "expected_params"], + [ + ("examples/test_simple.py", None), + ("examples/params/test_in_class_parameterized.py", {"param": "param"}), + ( + "examples/params/test_different_parameter_types.py", + {"integer": 1, "floating_point": 1.5, "boolean": True, "none": None}, + ), + ("examples/params/test_binary_symbol_in_parameters.py", {"text": BINARY_TEXT.replace("\0", "\\0")}), + ], +) def test_parameters(mock_client_init, test, expected_params): """Verify different tests have correct parameters. @@ -37,11 +43,11 @@ def test_parameters(mock_client_init, test, expected_params): :param expected_params: an expected parameter dictionary """ result = utils.run_pytest_tests(tests=[test]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['parameters'] == expected_params + assert step_call_args["parameters"] == expected_params diff --git a/tests/integration/test_pass_failed_skipped.py b/tests/integration/test_pass_failed_skipped.py index f69c4e7..97bb4ad 100644 --- a/tests/integration/test_pass_failed_skipped.py +++ b/tests/integration/test_pass_failed_skipped.py @@ -16,18 +16,20 @@ from unittest import mock import pytest -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils -@pytest.mark.parametrize(('test', 'expected_run_status', 'expected_item_status'), [ - ('examples/test_simple.py', 0, 'PASSED'), - ('examples/test_simple_fail.py', 1, 'FAILED'), - ('examples/skip/test_simple_skip.py', 0, - 'SKIPPED') -]) +@pytest.mark.parametrize( + ("test", "expected_run_status", "expected_item_status"), + [ + ("examples/test_simple.py", 0, "PASSED"), + ("examples/test_simple_fail.py", 1, "FAILED"), + ("examples/skip/test_simple_skip.py", 0, "SKIPPED"), + ], +) @mock.patch(REPORT_PORTAL_SERVICE) def test_simple_tests(mock_client_init, test, expected_run_status, expected_item_status): """Verify a simple test creates correct structure and finishes all items. @@ -43,25 +45,27 @@ def test_simple_tests(mock_client_init, test, expected_run_status, expected_item mock_client.start_test_item.side_effect = utils.item_id_gen result = utils.run_pytest_tests(tests=[test]) - assert int(result) == expected_run_status, 'Exit code should be ' + str(expected_run_status) + assert int(result) == expected_run_status, "Exit code should be " + str(expected_run_status) start_call_args = mock_client.start_test_item.call_args_list finish_call_args = mock_client.finish_test_item.call_args_list - assert len(start_call_args) == len(finish_call_args), 'Number of started items should be equal to finished items' + assert len(start_call_args) == len(finish_call_args), "Number of started items should be equal to finished items" for i in range(len(start_call_args)): start_test_step = start_call_args[-1 - i][1] finish_test_step = finish_call_args[i][1] - expect(finish_test_step['item_id'].startswith(start_test_step['name'])) + expect(finish_test_step["item_id"].startswith(start_test_step["name"])) if i == 0: - actual_status = finish_test_step['status'] - expect(actual_status == expected_item_status, - f'Invalid item status, actual "{actual_status}", expected: "{expected_item_status}"') + actual_status = finish_test_step["status"] + expect( + actual_status == expected_item_status, + f'Invalid item status, actual "{actual_status}", expected: "{expected_item_status}"', + ) finish_launch_call_args = mock_client.finish_launch.call_args_list expect(len(finish_launch_call_args) == 1) - expect('end_time' in finish_launch_call_args[0][1]) - expect(finish_launch_call_args[0][1]['end_time'] is not None) - expect('status' not in finish_launch_call_args[0][1]) + expect("end_time" in finish_launch_call_args[0][1]) + expect(finish_launch_call_args[0][1]["end_time"] is not None) + expect("status" not in finish_launch_call_args[0][1]) assert_expectations() diff --git a/tests/integration/test_pytest_parallel.py b/tests/integration/test_pytest_parallel.py index 7007882..9c94a9b 100644 --- a/tests/integration/test_pytest_parallel.py +++ b/tests/integration/test_pytest_parallel.py @@ -13,17 +13,18 @@ """This module includes integration tests for "pytest_parallel" plugin.""" -import pytest -from delayed_assert import expect, assert_expectations from unittest import mock +import pytest +from delayed_assert import assert_expectations, expect + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils from tests.helpers.utils import item_id_gen @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.skip(reason='This test breaks all other tests, so only for local execution') +@pytest.mark.skip(reason="This test breaks all other tests, so only for local execution") def test_pytest_parallel_threads(mock_client_init): """Verify "pytest_parallel" plugin run tests in two threads. @@ -32,8 +33,8 @@ def test_pytest_parallel_threads(mock_client_init): mock_client = mock_client_init.return_value mock_client.start_test_item.side_effect = item_id_gen - result = utils.run_pytest_tests(tests=['examples/hierarchy'], args=['--tests-per-worker', '2']) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/hierarchy"], args=["--tests-per-worker", "2"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value @@ -42,7 +43,7 @@ def test_pytest_parallel_threads(mock_client_init): assert_expectations() finish_args = mock_client.finish_launch.call_args_list - expect(finish_args[0][1]['status'] in ('PASSED', None), 'Launch failed') - launch_end_time = finish_args[0][1]['end_time'] - expect(launch_end_time is not None and int(launch_end_time) > 0, 'Launch end time is empty') + expect(finish_args[0][1]["status"] in ("PASSED", None), "Launch failed") + launch_end_time = finish_args[0][1]["end_time"] + expect(launch_end_time is not None and int(launch_end_time) > 0, "Launch end time is empty") assert_expectations() diff --git a/tests/integration/test_suite_hierarchy.py b/tests/integration/test_suite_hierarchy.py index 248b47f..6ac1498 100644 --- a/tests/integration/test_suite_hierarchy.py +++ b/tests/integration/test_suite_hierarchy.py @@ -13,29 +13,30 @@ """This module includes integration tests for different suite hierarchy.""" -import pytest from unittest import mock +import pytest + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils from tests.integration import HIERARCHY_TEST_PARAMETERS def verify_start_item_parameters(mock_client, expected_items): - assert mock_client.start_test_item.call_count == len(expected_items), \ - '"start_test_item" method was called incorrect number of times' + assert mock_client.start_test_item.call_count == len( + expected_items + ), '"start_test_item" method was called incorrect number of times' call_args = mock_client.start_test_item.call_args_list for i, call in enumerate(call_args): start_kwargs = call[1] - assert start_kwargs['name'] == expected_items[i]['name'] - assert start_kwargs['item_type'] == expected_items[i]['item_type'] - verification = expected_items[i]['parent_item_id'] - assert verification(start_kwargs['parent_item_id']) + assert start_kwargs["name"] == expected_items[i]["name"] + assert start_kwargs["item_type"] == expected_items[i]["item_type"] + verification = expected_items[i]["parent_item_id"] + assert verification(start_kwargs["parent_item_id"]) -@pytest.mark.parametrize(('test', 'variables', 'expected_items'), - HIERARCHY_TEST_PARAMETERS) +@pytest.mark.parametrize(("test", "variables", "expected_items"), HIERARCHY_TEST_PARAMETERS) @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_hierarchy_parameters(mock_client_init, test, variables, expected_items): """Verify suite hierarchy with `rp_hierarchy_dirs=True`. @@ -46,6 +47,6 @@ def test_rp_hierarchy_parameters(mock_client_init, test, variables, expected_ite mock_client.start_test_item.side_effect = utils.item_id_gen result = utils.run_pytest_tests(tests=test, variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" verify_start_item_parameters(mock_client, expected_items) diff --git a/tests/integration/test_threads_logs.py b/tests/integration/test_threads_logs.py index d6814f4..ae19161 100644 --- a/tests/integration/test_threads_logs.py +++ b/tests/integration/test_threads_logs.py @@ -28,17 +28,14 @@ def test_rp_thread_logs_reporting(mock_client_init): def init_thread_client(*_, **__): from reportportal_client import set_current + set_current(mock_thread_client) return mock_thread_client mock_client.clone.side_effect = init_thread_client - result = utils.run_tests_with_client( - mock_client, - ['examples/threads/'], - args=["--rp-thread-logging"] - ) + result = utils.run_tests_with_client(mock_client, ["examples/threads/"], args=["--rp-thread-logging"]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client.start_launch.call_count == 1, '"start_launch" method was not called' assert mock_client.log.call_count == 1 assert mock_thread_client.log.call_count == 2 diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 1248b81..28cdb51 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -13,26 +13,27 @@ """This module contains common Pytest fixtures and hooks for unit tests.""" +# noinspection PyUnresolvedReferences +from unittest import mock + import py from _pytest.config import Config from _pytest.main import Session from pluggy._tracing import TagTracer -from pytest import fixture, Module -# noinspection PyUnresolvedReferences -from unittest import mock - +from pytest import Module, fixture from reportportal_client import RPLogger + from pytest_reportportal.config import AgentConfig -from pytest_reportportal.service import PyTestServiceClass +from pytest_reportportal.service import PyTestService from tests import REPORT_PORTAL_SERVICE -ITEM_PATH = py.path.local('examples/test_simple.py') +ITEM_PATH = py.path.local("examples/test_simple.py") @fixture def logger(): """Prepare instance of the RPLogger for testing.""" - return RPLogger('pytest_reportportal.test') + return RPLogger("pytest_reportportal.test") @fixture() @@ -40,41 +41,35 @@ def mocked_config(): """Mock Pytest config for testing.""" mocked_config = mock.create_autospec(Config) - mocked_config.getoption_side_effects = { - '--collect-only': False, - '--setup-plan': False, - 'rp_log_level': 'debug' - } + mocked_config.getoption_side_effects = {"--collect-only": False, "--setup-plan": False, "rp_log_level": "debug"} def getoption_side_effect(name, default=None): - return mocked_config.getoption_side_effects.get( - name, default if default else mock.Mock() - ) + return mocked_config.getoption_side_effects.get(name, default if default else mock.Mock()) mocked_config._reporter_config = mock.Mock() mocked_config.getoption.side_effect = getoption_side_effect mocked_config._rp_enabled = True - mocked_config.rootdir = py.path.local('/path/to') - mocked_config.trace = TagTracer().get('root') + mocked_config.rootdir = py.path.local("/path/to") + mocked_config.trace = TagTracer().get("root") mocked_config.pluginmanager = mock.Mock() mocked_config.option = mock.create_autospec(Config) - mocked_config.option.rp_project = 'default_personal' - mocked_config.option.rp_endpoint = 'http://docker.local:8080/' + mocked_config.option.rp_project = "default_personal" + mocked_config.option.rp_endpoint = "http://docker.local:8080/" mocked_config.option.rp_api_key = mock.sentinel.rp_api_key mocked_config.option.rp_log_batch_size = -1 mocked_config.option.retries = -1 - mocked_config.option.rp_hierarchy_dirs_level = '0' + mocked_config.option.rp_hierarchy_dirs_level = "0" mocked_config.option.rp_rerun = False mocked_config.option.rp_launch_timeout = -1 mocked_config.option.rp_thread_logging = True - mocked_config.option.rp_launch_uuid_print = 'False' - mocked_config.option.rp_launch_uuid_print_output = 'STDOUT' - mocked_config.option.rp_client_type = 'SYNC' - mocked_config.option.rp_report_fixtures = 'False' - mocked_config.option.rp_hierarchy_code = 'False' - mocked_config.option.rp_hierarchy_dirs = 'False' - mocked_config.option.rp_hierarchy_test_file = 'True' - mocked_config.option.rp_skip_connection_test = 'False' + mocked_config.option.rp_launch_uuid_print = "False" + mocked_config.option.rp_launch_uuid_print_output = "STDOUT" + mocked_config.option.rp_client_type = "SYNC" + mocked_config.option.rp_report_fixtures = "False" + mocked_config.option.rp_hierarchy_code = "False" + mocked_config.option.rp_hierarchy_dirs = "False" + mocked_config.option.rp_hierarchy_test_file = "True" + mocked_config.option.rp_skip_connection_test = "False" return mocked_config @@ -91,7 +86,7 @@ def mocked_module(mocked_session): """Mock Pytest Module for testing.""" mocked_module = mock.create_autospec(Module) mocked_module.parent = mocked_session - mocked_module.name = 'module' + mocked_module.name = "module" mocked_module.fspath = ITEM_PATH return mocked_module @@ -102,7 +97,7 @@ def mocked_item(mocked_session, mocked_module): test_item = mock.Mock() test_item.session = mocked_session test_item.fspath = ITEM_PATH - name = 'test_item' + name = "test_item" test_item.name = name test_item.originalname = name test_item.parent = mocked_module @@ -111,8 +106,8 @@ def mocked_item(mocked_session, mocked_module): @fixture() def rp_service(mocked_config): - """Prepare instance of the PyTestServiceClass for testing.""" - service = PyTestServiceClass(AgentConfig(mocked_config)) - with mock.patch(REPORT_PORTAL_SERVICE + '.get_project_settings'): + """Prepare instance of the PyTestService for testing.""" + service = PyTestService(AgentConfig(mocked_config)) + with mock.patch(REPORT_PORTAL_SERVICE + ".get_project_settings"): service.start() return service diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 900a840..bf70947 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -12,25 +12,25 @@ # limitations under the License import pytest + from pytest_reportportal.config import AgentConfig @pytest.mark.parametrize( - ['verify_ssl', 'expected_result'], + ["verify_ssl", "expected_result"], [ - ('True', True), - ('False', False), - ('true', True), - ('false', False), + ("True", True), + ("False", False), + ("true", True), + ("false", False), (True, True), (False, False), - ('path/to/certificate', 'path/to/certificate'), - (None, True) - ] + ("path/to/certificate", "path/to/certificate"), + (None, True), + ], ) def test_verify_ssl_true(mocked_config, verify_ssl, expected_result): - mocked_config.getini.side_effect = \ - lambda x: verify_ssl if x == 'rp_verify_ssl' else None + mocked_config.getini.side_effect = lambda x: verify_ssl if x == "rp_verify_ssl" else None config = AgentConfig(mocked_config) assert config.rp_verify_ssl == expected_result diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py index 85745fa..9fc77a6 100644 --- a/tests/unit/test_plugin.py +++ b/tests/unit/test_plugin.py @@ -18,65 +18,64 @@ import pytest from _pytest.config.argparsing import Parser -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect from reportportal_client.errors import ResponseError from requests.exceptions import RequestException from pytest_reportportal.config import AgentConfig from pytest_reportportal.plugin import ( + FAILED_LAUNCH_WAIT, + LOGGER, + MANDATORY_PARAMETER_MISSED_PATTERN, is_control, - log, pytest_addoption, - pytest_configure, pytest_collection_finish, - pytest_sessionstart, + pytest_configure, pytest_sessionfinish, + pytest_sessionstart, wait_launch, - MANDATORY_PARAMETER_MISSED_PATTERN, FAILED_LAUNCH_WAIT ) -from pytest_reportportal.service import PyTestServiceClass +from pytest_reportportal.service import PyTestService def test_is_control(mocked_config): """Test is_master() function for the correct responses.""" mocked_config.workerinput = None expect(is_control(mocked_config) is False) - delattr(mocked_config, 'workerinput') + delattr(mocked_config, "workerinput") expect(is_control(mocked_config) is True) assert_expectations() -@mock.patch('reportportal_client.logs.RPLogger.handle') -@pytest.mark.parametrize('log_level', ('info', 'debug', 'warning', 'error')) +@mock.patch("reportportal_client.logs.RPLogger.handle") +@pytest.mark.parametrize("log_level", ("info", "debug", "warning", "error")) def test_logger_handle_attachment(mock_handler, logger, log_level): """Test logger call for different log levels with some text attachment.""" log_call = getattr(logger, log_level) - attachment = 'Some {} attachment'.format(log_level) + attachment = "Some {} attachment".format(log_level) log_call("Some {} message".format(log_level), attachment=attachment) - expect(mock_handler.call_count == 1, - 'logger.handle called more than 1 time') - expect(getattr(mock_handler.call_args[0][0], 'attachment') == attachment, - "record.attachment in args doesn't match real value") + expect(mock_handler.call_count == 1, "logger.handle called more than 1 time") + expect( + getattr(mock_handler.call_args[0][0], "attachment") == attachment, + "record.attachment in args doesn't match real value", + ) assert_expectations() -@mock.patch('reportportal_client.logs.RPLogger.handle') -@pytest.mark.parametrize('log_level', ('info', 'debug', 'warning', 'error')) +@mock.patch("reportportal_client.logs.RPLogger.handle") +@pytest.mark.parametrize("log_level", ("info", "debug", "warning", "error")) def test_logger_handle_no_attachment(mock_handler, logger, log_level): """Test logger call for different log levels without any attachment.""" log_call = getattr(logger, log_level) - log_call('Some {} message'.format(log_level)) - expect(mock_handler.call_count == 1, - 'logger.handle called more than 1 time') - expect(getattr(mock_handler.call_args[0][0], 'attachment') is None, - 'record.attachment in args is not None') + log_call("Some {} message".format(log_level)) + expect(mock_handler.call_count == 1, "logger.handle called more than 1 time") + expect(getattr(mock_handler.call_args[0][0], "attachment") is None, "record.attachment in args is not None") assert_expectations() -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.PyTestServiceClass') -def test_portal_on_maintenance(mocked_service_class, mocked_config, - mocked_session): +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.PyTestService") +def test_portal_on_maintenance(mocked_service_class, mocked_config, mocked_session): """Test session configuration if RP is in maintenance mode. :param mocked_session: pytest fixture @@ -86,14 +85,13 @@ def test_portal_on_maintenance(mocked_service_class, mocked_config, mocked_service = mocked_service_class.return_value mocked_config.py_test_service = mocked_service - mocked_service.start.side_effect = \ - ResponseError("Report Portal - Maintenance") + mocked_service.start.side_effect = ResponseError("Report Portal - Maintenance") pytest_sessionstart(mocked_session) assert mocked_config.py_test_service.rp is None -@mock.patch('pytest_reportportal.plugin.requests.Session.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) +@mock.patch("pytest_reportportal.plugin.requests.Session.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) def test_pytest_configure(mocked_config): """Test plugin successful configuration. @@ -103,18 +101,14 @@ def test_pytest_configure(mocked_config): mocked_config.option.rp_project = None pytest_configure(mocked_config) expect(mocked_config._rp_enabled is True) - expect( - lambda: isinstance(mocked_config.py_test_service, PyTestServiceClass)) + expect(lambda: isinstance(mocked_config.py_test_service, PyTestService)) assert_expectations() mocked_config.getoption.assert_has_calls( - [ - mock.call('--collect-only', default=False), - mock.call('--setup-plan', default=False) - ] + [mock.call("--collect-only", default=False), mock.call("--setup-plan", default=False)] ) -@mock.patch('pytest_reportportal.plugin.requests.get') +@mock.patch("pytest_reportportal.plugin.requests.get") def test_pytest_configure_dry_run(mocked_config): """Test plugin configuration in case of dry-run execution.""" mocked_config.getoption.return_value = True @@ -122,8 +116,8 @@ def test_pytest_configure_dry_run(mocked_config): assert mocked_config._rp_enabled is False -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.log', wraps=log) +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.LOGGER", wraps=LOGGER) def test_pytest_configure_misssing_rp_endpoint(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_endpoint. @@ -145,14 +139,15 @@ def test_pytest_configure_misssing_rp_endpoint(mocked_log, mocked_config): mocked_config.option.rp_project, None, mocked_config.option.rp_api_key, - )), - mock.call('Disabling reporting to RP.'), + ) + ), + mock.call("Disabling reporting to RP."), ] ) -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.log', wraps=log) +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.LOGGER", wraps=LOGGER) def test_pytest_configure_misssing_rp_project(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_project. @@ -174,14 +169,15 @@ def test_pytest_configure_misssing_rp_project(mocked_log, mocked_config): None, mocked_config.option.rp_endpoint, mocked_config.option.rp_api_key, - )), - mock.call('Disabling reporting to RP.'), + ) + ), + mock.call("Disabling reporting to RP."), ] ) -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.log', wraps=log) +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.LOGGER", wraps=LOGGER) def test_pytest_configure_misssing_rp_uuid(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_uuid. @@ -203,13 +199,14 @@ def test_pytest_configure_misssing_rp_uuid(mocked_log, mocked_config): mocked_config.option.rp_project, mocked_config.option.rp_endpoint, None, - )), - mock.call('Disabling reporting to RP.'), + ) + ), + mock.call("Disabling reporting to RP."), ] ) -@mock.patch('pytest_reportportal.plugin.requests.get') +@mock.patch("pytest_reportportal.plugin.requests.get") def test_pytest_configure_on_conn_error(mocked_get, mocked_config): """Test plugin configuration in case of HTTP error. @@ -223,13 +220,13 @@ def test_pytest_configure_on_conn_error(mocked_get, mocked_config): mock_response.raise_for_status.side_effect = RequestException() mocked_get.return_value = mock_response mocked_config.option.rp_enabled = True - mocked_config.option.rp_skip_connection_test = 'False' + mocked_config.option.rp_skip_connection_test = "False" pytest_configure(mocked_config) assert mocked_config._rp_enabled is False -@mock.patch('pytest_reportportal.plugin.LAUNCH_WAIT_TIMEOUT', 1) -@mock.patch('pytest_reportportal.plugin.time') +@mock.patch("pytest_reportportal.plugin.LAUNCH_WAIT_TIMEOUT", 1) +@mock.patch("pytest_reportportal.plugin.time") def test_wait_launch(time_mock): """Test wait_launch() function for the correct behavior.""" time_mock.time.side_effect = [0, 1, 2] @@ -245,13 +242,11 @@ def test_pytest_collection_finish(mocked_session): """ mocked_session.config.py_test_service = mock.Mock() pytest_collection_finish(mocked_session) - mocked_session.config.py_test_service.collect_tests. \ - assert_called_with(mocked_session) + mocked_session.config.py_test_service.collect_tests.assert_called_with(mocked_session) -@mock.patch('pytest_reportportal.plugin.wait_launch', - mock.Mock(return_value=True)) -@mock.patch('pytest_reportportal.plugin.is_control', mock.Mock()) +@mock.patch("pytest_reportportal.plugin.wait_launch", mock.Mock(return_value=True)) +@mock.patch("pytest_reportportal.plugin.is_control", mock.Mock()) def test_pytest_sessionstart(mocked_session): """Test session configuration if RP plugin is correctly configured. @@ -270,18 +265,16 @@ def test_pytest_sessionstart(mocked_session): assert_expectations() -@mock.patch('pytest_reportportal.plugin.log', wraps=log) -@mock.patch('pytest_reportportal.plugin.is_control', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.wait_launch', - mock.Mock(return_value=False)) +@mock.patch("pytest_reportportal.plugin.LOGGER", wraps=LOGGER) +@mock.patch("pytest_reportportal.plugin.is_control", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.wait_launch", mock.Mock(return_value=False)) def test_pytest_sessionstart_launch_wait_fail(mocked_log, mocked_session): """Test session configuration if RP plugin is correctly configured. :param mocked_session: pytest fixture """ mocked_session.config.pluginmanager.hasplugin.return_value = True - mocked_session.config._reporter_config = mock.Mock( - spec=AgentConfig(mocked_session.config)) + mocked_session.config._reporter_config = mock.Mock(spec=AgentConfig(mocked_session.config)) mocked_session.config._reporter_config.rp_launch_attributes = [] mocked_session.config._reporter_config.rp_launch_id = None mocked_session.config.py_test_service = mock.Mock() @@ -289,14 +282,10 @@ def test_pytest_sessionstart_launch_wait_fail(mocked_log, mocked_session): expect(lambda: mocked_session.config.py_test_service.rp is None) expect(lambda: mocked_session.config._rp_enabled is False) assert_expectations() - mocked_log.error.assert_has_calls( - [ - mock.call(FAILED_LAUNCH_WAIT) - ] - ) + mocked_log.error.assert_has_calls([mock.call(FAILED_LAUNCH_WAIT)]) -@mock.patch('pytest_reportportal.plugin.wait_launch', mock.Mock()) +@mock.patch("pytest_reportportal.plugin.wait_launch", mock.Mock()) def test_pytest_sessionstart_xdist(mocked_session): """Test session configuration if it's worker xdist node. @@ -313,7 +302,7 @@ def test_pytest_sessionstart_xdist(mocked_session): assert_expectations() -@mock.patch('pytest_reportportal.plugin.is_control', mock.Mock()) +@mock.patch("pytest_reportportal.plugin.is_control", mock.Mock()) def test_pytest_sessionfinish(mocked_session): """Test sessionfinish with the configured RP plugin. @@ -329,47 +318,47 @@ def test_pytest_addoption_adds_correct_ini_file_arguments(): """Test the correct list of options are available in the .ini file.""" mock_parser = mock.MagicMock(spec=Parser) expected_argument_names = ( - 'rp_launch', - 'rp_launch_id', - 'rp_launch_description', - 'rp_project', - 'rp_log_level', - 'rp_log_format', - 'rp_rerun', - 'rp_rerun_of', - 'rp_parent_item_id', - 'rp_uuid', - 'rp_api_key', - 'rp_endpoint', - 'rp_mode', - 'rp_thread_logging', - 'rp_launch_uuid_print', - 'rp_launch_uuid_print_output', - 'rp_launch_attributes', - 'rp_tests_attributes', - 'rp_log_batch_size', - 'rp_log_batch_payload_size', - 'rp_ignore_attributes', - 'rp_is_skipped_an_issue', - 'rp_hierarchy_code', - 'rp_hierarchy_dirs_level', - 'rp_hierarchy_dirs', - 'rp_hierarchy_dir_path_separator', - 'rp_hierarchy_test_file', - 'rp_issue_system_url', - 'rp_bts_issue_url', - 'rp_bts_project', - 'rp_bts_url', - 'rp_verify_ssl', - 'rp_issue_id_marks', - 'retries', - 'rp_api_retries', - 'rp_skip_connection_test', - 'rp_launch_timeout', - 'rp_client_type', - 'rp_connect_timeout', - 'rp_read_timeout', - 'rp_report_fixtures' + "rp_launch", + "rp_launch_id", + "rp_launch_description", + "rp_project", + "rp_log_level", + "rp_log_format", + "rp_rerun", + "rp_rerun_of", + "rp_parent_item_id", + "rp_uuid", + "rp_api_key", + "rp_endpoint", + "rp_mode", + "rp_thread_logging", + "rp_launch_uuid_print", + "rp_launch_uuid_print_output", + "rp_launch_attributes", + "rp_tests_attributes", + "rp_log_batch_size", + "rp_log_batch_payload_size", + "rp_ignore_attributes", + "rp_is_skipped_an_issue", + "rp_hierarchy_code", + "rp_hierarchy_dirs_level", + "rp_hierarchy_dirs", + "rp_hierarchy_dir_path_separator", + "rp_hierarchy_test_file", + "rp_issue_system_url", + "rp_bts_issue_url", + "rp_bts_project", + "rp_bts_url", + "rp_verify_ssl", + "rp_issue_id_marks", + "retries", + "rp_api_retries", + "rp_skip_connection_test", + "rp_launch_timeout", + "rp_client_type", + "rp_connect_timeout", + "rp_read_timeout", + "rp_report_fixtures", ) pytest_addoption(mock_parser) @@ -383,23 +372,23 @@ def test_pytest_addoption_adds_correct_ini_file_arguments(): def test_pytest_addoption_adds_correct_command_line_arguments(): """Test the correct list of options are available in the command line.""" expected_argument_names = ( - '--reportportal', - '--rp-launch', - '--rp-launch-id', - '--rp-launch-description', - '--rp-project', - '--rp-log-level', - '--rp-log-format', - '--rp-rerun', - '--rp-rerun-of', - '--rp-parent-item-id', - '--rp-uuid', - '--rp-api-key', - '--rp-endpoint', - '--rp-mode', - '--rp-thread-logging', - '--rp-launch-uuid-print', - '--rp-launch-uuid-print-output' + "--reportportal", + "--rp-launch", + "--rp-launch-id", + "--rp-launch-description", + "--rp-project", + "--rp-log-level", + "--rp-log-format", + "--rp-rerun", + "--rp-rerun-of", + "--rp-parent-item-id", + "--rp-uuid", + "--rp-api-key", + "--rp-endpoint", + "--rp-mode", + "--rp-thread-logging", + "--rp-launch-uuid-print", + "--rp-launch-uuid-print-output", ) mock_parser = mock.MagicMock(spec=Parser) mock_reporting_group = mock_parser.getgroup.return_value diff --git a/tests/unit/test_service.py b/tests/unit/test_service.py index 8bfeb54..286a4c4 100644 --- a/tests/unit/test_service.py +++ b/tests/unit/test_service.py @@ -13,16 +13,16 @@ """This module includes unit tests for the service.py module.""" -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect def test_get_item_parameters(mocked_item, rp_service): """Test that parameters are returned in a way supported by the client.""" - mocked_item.callspec.params = {'param': 'param_value'} + mocked_item.callspec.params = {"param": "param_value"} - expect(rp_service._get_parameters(mocked_item) == {'param': 'param_value'}) + expect(rp_service._get_parameters(mocked_item) == {"param": "param_value"}) - delattr(mocked_item, 'callspec') + delattr(mocked_item, "callspec") expect(rp_service._get_parameters(mocked_item) is None) assert_expectations() diff --git a/tox.ini b/tox.ini index ef7de81..43fd5f3 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,7 @@ isolated_build = True envlist = pep - py37 + nobdd py38 py39 py310 @@ -14,23 +14,33 @@ envlist = deps = -rrequirements.txt -rrequirements-dev.txt + -rrequirements-dev-bdd.txt setenv = AGENT_NO_ANALYTICS = 1 commands = pytest --cov={envsitepackagesdir}/pytest_reportportal --cov-report=xml tests/ -s -vv +[testenv:nobdd] +deps = + -rrequirements.txt + -rrequirements-dev.txt + +setenv = + AGENT_NO_ANALYTICS = 1 + +commands = pytest tests/ -s -vv --ignore tests/integration/test_bdd.py + [testenv:pep] skip_install = True -deps = pre-commit>=1.11.0 +deps = pre-commit>=1.19.0 commands = pre-commit run --all-files --show-diff-on-failure [gh-actions] python = - 3.7: py37 3.8: py38 3.9: py39 - 3.10: pep, py310 + 3.10: pep, nobdd, py310 3.11: py311 3.12: py312 3.13: py313