From cf6972125ac9044be22ab9059eff941c721395d4 Mon Sep 17 00:00:00 2001 From: "reportportal.io" Date: Wed, 4 Dec 2024 09:43:32 +0000 Subject: [PATCH 001/110] Changelog update --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5125920..6a6ed4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ # Changelog ## [Unreleased] + +## [5.4.7] ### Added - Escaping of binary symbol '\0' in parameters, by @HardNorth From 03cc8f1336b3731608a8c46822b8024cd6421e05 Mon Sep 17 00:00:00 2001 From: "reportportal.io" Date: Wed, 4 Dec 2024 09:43:33 +0000 Subject: [PATCH 002/110] Version update --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b5dc3ab..a705ba0 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ from setuptools import setup -__version__ = '5.4.7' +__version__ = '5.4.8' def read_file(fname): From c53e48d632aea559ccecfe62f1fb7482972f8f0c Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 4 Dec 2024 12:46:12 +0300 Subject: [PATCH 003/110] CHANGELOG.md update --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a6ed4e..b7e13ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ ## [5.4.7] ### Added -- Escaping of binary symbol '\0' in parameters, by @HardNorth +- Issue [#382](https://github.com/reportportal/agent-python-pytest/issues/382): Escaping of binary symbol '\0' in parameters, by @HardNorth ## [5.4.6] ### Added From 65ab50f7d1310624a186181b21fb305d5b6f8991 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 5 Dec 2024 11:44:35 +0300 Subject: [PATCH 004/110] README.rst converted to README.md --- .github/workflows/release.yml | 2 +- README.md | 122 +++++++++++++++++++++++++++++ README.rst | 141 ---------------------------------- setup.py | 4 +- 4 files changed, 125 insertions(+), 144 deletions(-) create mode 100644 README.md delete mode 100644 README.rst diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6acb220..4568ac6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ on: paths-ignore: - '.github/**' - CHANGELOG.md - - README.rst + - README.md - CONTRIBUTING.rst env: diff --git a/README.md b/README.md new file mode 100644 index 0000000..d4d7928 --- /dev/null +++ b/README.md @@ -0,0 +1,122 @@ +# ReportPortal integration for pytest framework + +Pytest plugin for reporting test results of the Pytest to the ReportPortal. + +> **DISCLAIMER**: We use Google Analytics for sending anonymous usage information such as agent's and client's names, +> and their versions after a successful launch start. This information might help us to improve both ReportPortal +> backend and client sides. It is used by the ReportPortal team only and is not supposed for sharing with 3rd parties. + +[![PyPI](https://img.shields.io/pypi/v/pytest-reportportal.svg?maxAge=259200)](https://pypi.python.org/pypi/pytest-reportportal) +[![Python versions](https://img.shields.io/pypi/pyversions/pytest-reportportal.svg)](https://pypi.org/project/pytest-reportportal) +[![Tests](https://github.com/reportportal/agent-python-pytest/actions/workflows/tests.yml/badge.svg)](https://github.com/reportportal/agent-python-pytest/actions/workflows/tests.yml) +[![codecov](https://codecov.io/gh/reportportal/agent-python-pytest/graph/badge.svg?token=x5ZHqZKJFV)](https://codecov.io/gh/reportportal/agent-python-pytest) +[![Join Slack chat!](https://img.shields.io/badge/slack-join-brightgreen.svg)](https://slack.epmrpp.reportportal.io/) +[![stackoverflow](https://img.shields.io/badge/reportportal-stackoverflow-orange.svg?style=flat)](http://stackoverflow.com/questions/tagged/reportportal) +[![Build with Love](https://img.shields.io/badge/build%20with-❤%EF%B8%8F%E2%80%8D-lightgrey.svg)](http://reportportal.io?style=flat) + +## Installation + +To install pytest plugin execute next command in a terminal: + +```bash +pip install pytest-reportportal +``` + +Look through the `CONTRIBUTING.rst` for contribution guidelines. + +## Configuration + +Prepare the config file `pytest.ini` in root directory of tests or specify any one using pytest command line option: + +```bash +py.test -c config.cfg +``` + +The `pytest.ini` file should have next mandatory fields: + +- `rp_api_key` - value could be found in the User Profile section +- `rp_project` - name of project in ReportPortal +- `rp_endpoint` - address of ReportPortal Server + +Example of `pytest.ini`: + +```text +[pytest] +rp_api_key = fb586627-32be-47dd-93c1-678873458a5f +rp_endpoint = http://192.168.1.10:8080 +rp_project = user_personal +rp_launch = AnyLaunchName +rp_launch_attributes = 'PyTest' 'Smoke' +rp_launch_description = 'Smoke test' +rp_ignore_attributes = 'xfail' 'usefixture' +``` + +- The `rp_api_key` can also be set with the environment variable `RP_API_KEY`. This will override the value set for `rp_api_key` in pytest.ini + +There are also optional parameters: +https://reportportal.io/docs/log-data-in-reportportal/test-framework-integration/Python/pytest/ + +## Examples + +For logging of the test item flow to ReportPortal, please, use the python logging handler provided by plugin like +below: + +in `conftest.py`: + +```python +import logging + +import pytest + +from reportportal_client import RPLogger + + +@pytest.fixture(scope="session") +def rp_logger(): + logger = logging.getLogger(__name__) + logger.setLevel(logging.DEBUG) + logging.setLoggerClass(RPLogger) + return logger +``` + +in tests: + +```python +# In this case only INFO messages will be sent to the ReportPortal. +def test_one(rp_logger): + rp_logger.info("Case1. Step1") + x = "this" + rp_logger.info("x is: %s", x) + assert 'h' in x + + # Message with an attachment. + import subprocess + free_memory = subprocess.check_output("free -h".split()) + rp_logger.info( + "Case1. Memory consumption", + attachment={ + "name": "free_memory.txt", + "data": free_memory, + "mime": "application/octet-stream", + }, + ) + + # This debug message will not be sent to the ReportPortal. + rp_logger.debug("Case1. Debug message") +``` + +## Launching + +To run test with ReportPortal you must provide `--reportportal` flag: + +```bash +py.test ./tests --reportportal +``` + +Check the documentation to find more detailed information about how to integrate pytest with ReportPortal using the +agent: +https://reportportal.io/docs/log-data-in-reportportal/test-framework-integration/Python/pytest/ + +## Copyright Notice + +Licensed under the [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) license (see the LICENSE file). diff --git a/README.rst b/README.rst deleted file mode 100644 index 4402c3c..0000000 --- a/README.rst +++ /dev/null @@ -1,141 +0,0 @@ -=================== -agent-python-pytest -=================== - -.. image:: https://img.shields.io/pypi/v/pytest-reportportal.svg - :target: https://pypi.python.org/pypi/pytest-reportportal - :alt: Latest Version -.. image:: https://img.shields.io/pypi/pyversions/pytest-reportportal.svg - :target: https://pypi.org/project/pytest-reportportal - :alt: Supported python versions -.. image:: https://github.com/reportportal/agent-python-pytest/actions/workflows/tests.yml/badge.svg - :target: https://github.com/reportportal/agent-python-pytest/actions/workflows/tests.yml - :alt: Test status -.. image:: https://codecov.io/gh/reportportal/agent-python-pytest/branch/develop/graph/badge.svg - :target: https://codecov.io/gh/reportportal/agent-python-pytest - :alt: Test coverage -.. image:: https://img.shields.io/badge/slack-join-brightgreen.svg - :target: https://slack.epmrpp.reportportal.io/ - :alt: Join Slack chat! - - -Pytest plugin for reporting test results of the Pytest to the ReportPortal. - -Installation -~~~~~~~~~~~~ - -To install pytest plugin execute next command in a terminal: - -.. code-block:: bash - - pip install pytest-reportportal - - - -Look through the CONTRIBUTING.rst for contribution guidelines. - -Configuration -~~~~~~~~~~~~~ - -Prepare the config file :code:`pytest.ini` in root directory of tests or specify -any one using pytest command line option: - -.. code-block:: bash - - py.test -c config.cfg - - -The :code:`pytest.ini` file should have next mandatory fields: - -- :code:`rp_api_key` - value could be found in the User Profile section -- :code:`rp_project` - name of project in ReportPortal -- :code:`rp_endpoint` - address of ReportPortal Server - -Example of :code:`pytest.ini`: - -.. code-block:: text - - [pytest] - rp_api_key = fb586627-32be-47dd-93c1-678873458a5f - rp_endpoint = http://192.168.1.10:8080 - rp_project = user_personal - rp_launch = AnyLaunchName - rp_launch_attributes = 'PyTest' 'Smoke' - rp_launch_description = 'Smoke test' - rp_ignore_attributes = 'xfail' 'usefixture' - -- The :code:`rp_api_key` can also be set with the environment variable `RP_API_KEY`. This will override the value set for :code:`rp_api_key` in pytest.ini - -There are also optional parameters: -https://reportportal.io/docs/log-data-in-reportportal/test-framework-integration/Python/pytest/ - -Examples -~~~~~~~~ - -For logging of the test item flow to ReportPortal, please, use the python -logging handler provided by plugin like bellow: - -in conftest.py: - -.. code-block:: python - - import logging - import sys - - import pytest - - from reportportal_client import RPLogger - - - @pytest.fixture(scope="session") - def rp_logger(): - logger = logging.getLogger(__name__) - logger.setLevel(logging.DEBUG) - logging.setLoggerClass(RPLogger) - return logger - -in tests: - -.. code-block:: python - - # In this case only INFO messages will be sent to the ReportPortal. - def test_one(rp_logger): - rp_logger.info("Case1. Step1") - x = "this" - rp_logger.info("x is: %s", x) - assert 'h' in x - - # Message with an attachment. - import subprocess - free_memory = subprocess.check_output("free -h".split()) - rp_logger.info( - "Case1. Memory consumption", - attachment={ - "name": "free_memory.txt", - "data": free_memory, - "mime": "application/octet-stream", - }, - ) - - # This debug message will not be sent to the ReportPortal. - rp_logger.debug("Case1. Debug message") - -Launching -~~~~~~~~~ - -To run test with ReportPortal you must provide '--reportportal' flag: - -.. code-block:: bash - - py.test ./tests --reportportal - -Check the documentation to find more detailed information about how to integrate pytest with ReportPortal using an agent: -https://reportportal.io/docs/log-data-in-reportportal/test-framework-integration/Python/pytest/ - -Copyright Notice ----------------- -.. Copyright Notice: https://github.com/reportportal/agent-python-pytest#copyright-notice - -Licensed under the `Apache 2.0`_ license (see the LICENSE file). - -.. _Apache 2.0: https://www.apache.org/licenses/LICENSE-2.0 diff --git a/setup.py b/setup.py index a705ba0..47efdc8 100644 --- a/setup.py +++ b/setup.py @@ -35,8 +35,8 @@ def read_file(fname): name='pytest-reportportal', version=__version__, description='Agent for Reporting results of tests to the Report Portal', - long_description=read_file('README.rst'), - long_description_content_type='text/x-rst', + long_description=read_file('README.md'), + long_description_content_type='text/markdown', author='Report Portal Team', author_email='support@reportportal.io', url='https://github.com/reportportal/agent-python-pytest', From c1c719385711c7256a973fc810f9afdaf1032bc6 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 5 Dec 2024 11:45:14 +0300 Subject: [PATCH 005/110] Set Python 3.10 as release version of Python --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4568ac6..6be3bf2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -41,7 +41,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.10' - name: Install dependencies run: python -m pip install --upgrade pip setuptools wheel From c756aeb258f33fedea939e0d5f767320900753f3 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 5 Feb 2025 17:19:32 +0300 Subject: [PATCH 006/110] Some type fixes --- pytest_reportportal/plugin.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index 4f6d462..ebb530b 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -17,7 +17,7 @@ import os.path import time from logging import Logger -from typing import Any +from typing import Any, Generator, Optional import _pytest.logging import dill as pickle @@ -234,7 +234,7 @@ def pytest_configure(config) -> None: # noinspection PyProtectedMember @pytest.hookimpl(hookwrapper=True) -def pytest_runtestloop(session: Session) -> None: +def pytest_runtestloop(session: Session) -> Generator[None, Any, None]: """ Control start and finish of all test items in the session. @@ -253,7 +253,7 @@ def pytest_runtestloop(session: Session) -> None: # noinspection PyProtectedMember @pytest.hookimpl(hookwrapper=True) -def pytest_runtest_protocol(item: Item) -> None: +def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: """Control start and finish of pytest items. :param item: Pytest.Item @@ -282,23 +282,21 @@ def pytest_runtest_protocol(item: Item) -> None: # noinspection PyProtectedMember @pytest.hookimpl(hookwrapper=True) -def pytest_runtest_makereport(item: Item) -> None: +def pytest_runtest_makereport(item: Item) -> Generator[None, Any, None]: """Change runtest_makereport function. :param item: pytest.Item :return: None """ - config = item.config - if not config._rp_enabled: - yield + result = yield + if not item.config._rp_enabled: return - - report = (yield).get_result() + report = result.get_result() service = item.config.py_test_service service.process_results(item, report) -def report_fixture(request, fixturedef, name: str, error_msg: str) -> None: +def report_fixture(request, fixturedef, name: str, error_msg: str) -> Generator[Any | None, Any | None, None]: """Report fixture setup and teardown. :param request: Object of the FixtureRequest class @@ -329,7 +327,7 @@ def report_fixture(request, fixturedef, name: str, error_msg: str) -> None: # no types for backward compatibility for older pytest versions @pytest.hookimpl(hookwrapper=True) -def pytest_fixture_setup(fixturedef, request) -> None: +def pytest_fixture_setup(fixturedef, request) -> Generator[Optional[Any], Optional[Any], None]: """Report fixture setup. :param fixturedef: represents definition of the texture class @@ -342,7 +340,7 @@ def pytest_fixture_setup(fixturedef, request) -> None: # no types for backward compatibility for older pytest versions @pytest.hookimpl(hookwrapper=True) -def pytest_fixture_post_finalizer(fixturedef, request) -> None: +def pytest_fixture_post_finalizer(fixturedef, request) -> Generator[Optional[Any], Optional[Any], None]: """Report fixture teardown. :param fixturedef: represents definition of the texture class From 71fdd7c5e86759462a6af2317145812b784a0d12 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 5 Feb 2025 17:37:06 +0300 Subject: [PATCH 007/110] Add pytest_bdd check --- pytest_reportportal/plugin.py | 7 +++++++ requirements-dev.txt | 1 + 2 files changed, 8 insertions(+) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index ebb530b..42aef7b 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -34,6 +34,13 @@ from pytest_reportportal.rp_logging import patching_logger_class, patching_thread_class from pytest_reportportal.service import PyTestServiceClass +try: + # noinspection PyPackageRequirements + from pytest_bdd import given + PYTEST_BDD = True +except ImportError: + PYTEST_BDD = False + log: Logger = logging.getLogger(__name__) MANDATORY_PARAMETER_MISSED_PATTERN: str = \ diff --git a/requirements-dev.txt b/requirements-dev.txt index 8cddeb9..14c6d5e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,3 +1,4 @@ delayed-assert pytest-cov pytest-parallel +pytest-bdd>=3.1.0 From 18c624b9b793ddabbf057b7abb43068033b7a2b0 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 13:47:45 +0300 Subject: [PATCH 008/110] Rollback ubuntu --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 234988f..362e356 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,7 +17,7 @@ on: [ push, pull_request ] jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: python-version: [ '3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ] From 86482803fe5138ced93c6af770005b63a3f31fe2 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 13:59:42 +0300 Subject: [PATCH 009/110] Fix typing --- pytest_reportportal/plugin.py | 8 ++++---- pytest_reportportal/service.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index 42aef7b..d051597 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -17,7 +17,7 @@ import os.path import time from logging import Logger -from typing import Any, Generator, Optional +from typing import Any, Generator import _pytest.logging import dill as pickle @@ -303,7 +303,7 @@ def pytest_runtest_makereport(item: Item) -> Generator[None, Any, None]: service.process_results(item, report) -def report_fixture(request, fixturedef, name: str, error_msg: str) -> Generator[Any | None, Any | None, None]: +def report_fixture(request, fixturedef, name: str, error_msg: str) -> Generator[None, Any, None]: """Report fixture setup and teardown. :param request: Object of the FixtureRequest class @@ -334,7 +334,7 @@ def report_fixture(request, fixturedef, name: str, error_msg: str) -> Generator[ # no types for backward compatibility for older pytest versions @pytest.hookimpl(hookwrapper=True) -def pytest_fixture_setup(fixturedef, request) -> Generator[Optional[Any], Optional[Any], None]: +def pytest_fixture_setup(fixturedef, request) -> Generator[None, Any, None]: """Report fixture setup. :param fixturedef: represents definition of the texture class @@ -347,7 +347,7 @@ def pytest_fixture_setup(fixturedef, request) -> Generator[Optional[Any], Option # no types for backward compatibility for older pytest versions @pytest.hookimpl(hookwrapper=True) -def pytest_fixture_post_finalizer(fixturedef, request) -> Generator[Optional[Any], Optional[Any], None]: +def pytest_fixture_post_finalizer(fixturedef, request) -> Generator[None, Any, None]: """Report fixture teardown. :param fixturedef: represents definition of the texture class diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 31a593f..6a13d86 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -20,7 +20,7 @@ from functools import wraps from os import curdir from time import time, sleep -from typing import List, Any, Optional, Set, Dict, Tuple, Union, Callable +from typing import List, Any, Optional, Set, Dict, Tuple, Union, Callable, Generator from _pytest.doctest import DoctestItem from aenum import auto, Enum, unique @@ -892,7 +892,7 @@ def post_log(self, test_item, message: str, log_level: str = 'INFO', attachment: sl_rq = self._build_log(item_id, message, log_level, attachment) self.rp.log(**sl_rq) - def report_fixture(self, name: str, error_msg: str) -> None: + def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None]: """Report fixture setup and teardown. :param name: Name of the fixture From a0d080a159196374ba9fb9e4e4124688d165a918 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 14:05:25 +0300 Subject: [PATCH 010/110] Revert "Rollback ubuntu" This reverts commit 18c624b9b793ddabbf057b7abb43068033b7a2b0. --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 362e356..234988f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,7 +17,7 @@ on: [ push, pull_request ] jobs: build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest strategy: matrix: python-version: [ '3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ] From 42526f91580181620cd6fcbb7a5cea3ed74577f0 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 14:06:31 +0300 Subject: [PATCH 011/110] Update pytest-bdd --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 14c6d5e..0209e42 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,4 +1,4 @@ delayed-assert pytest-cov pytest-parallel -pytest-bdd>=3.1.0 +pytest-bdd>=7.0.1 From 77bb9cf2322d11e591ec9b1c450f3746c24d7765 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 14:11:10 +0300 Subject: [PATCH 012/110] Remove Python 3.7 support --- .github/workflows/tests.yml | 2 +- setup.py | 3 +-- tox.ini | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 234988f..779affa 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [ '3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ] + python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ] steps: - name: Checkout repository uses: actions/checkout@v4 diff --git a/setup.py b/setup.py index 47efdc8..3f33715 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ from setuptools import setup -__version__ = '5.4.8' +__version__ = '5.5.0' def read_file(fname): @@ -47,7 +47,6 @@ def read_file(fname): keywords=['testing', 'reporting', 'reportportal', 'pytest', 'agent'], classifiers=[ 'Framework :: Pytest', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', diff --git a/tox.ini b/tox.ini index ef7de81..60670a9 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,6 @@ isolated_build = True envlist = pep - py37 py38 py39 py310 @@ -27,7 +26,6 @@ commands = pre-commit run --all-files --show-diff-on-failure [gh-actions] python = - 3.7: py37 3.8: py38 3.9: py39 3.10: pep, py310 From 992001a61c6002f5735b7047783cd9926ba1f219 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 14:12:43 +0300 Subject: [PATCH 013/110] CHANGELOG.md update --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7e13ec..b2b6ac3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ # Changelog ## [Unreleased] +### Removed +- `Python 3.7` support, by @HardNorth ## [5.4.7] ### Added From 26ba96febaf1ab3b712ab3600e8fa21dba34d421 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 14:15:05 +0300 Subject: [PATCH 014/110] Disable flake8 for specific line --- pytest_reportportal/plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index d051597..d28c820 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -36,7 +36,7 @@ try: # noinspection PyPackageRequirements - from pytest_bdd import given + from pytest_bdd import given # noqa: F401 PYTEST_BDD = True except ImportError: PYTEST_BDD = False From 935dc3336e9331fa16f2ac9df4ab6f93912fa403 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 14:53:58 +0300 Subject: [PATCH 015/110] Apply new code style --- .pre-commit-config.yaml | 42 +- examples/attributes/test_runtime_attribute.py | 4 +- examples/custom_name/test_custom_name_args.py | 2 +- .../custom_name/test_custom_name_empty.py | 2 +- .../custom_name/test_custom_name_kwargs.py | 2 +- .../fixtures/class_fixture_return/conftest.py | 4 +- .../test_fixture_class_setup.py | 1 + .../module_fixture_return/conftest.py | 4 +- .../test_fixture_module_setup.py | 1 + .../package_fixture_return/conftest.py | 4 +- .../test_fixture_package_setup_first.py | 1 + .../test_fixture_package_setup_second.py | 1 + .../session_fixture_return/conftest.py | 4 +- .../test_failure_fixture_teardown/conftest.py | 4 +- .../test_failure_fixture_teardown.py | 1 + .../test_fixture_return_none/conftest.py | 2 +- .../fixtures/test_fixture_setup/conftest.py | 2 +- .../test_fixture_setup_failure/conftest.py | 4 +- .../test_fixture_setup_failure.py | 2 +- .../test_fixture_teardown/conftest.py | 4 +- .../test_fixture_teardown_failure/conftest.py | 6 +- .../test_fixture_yield_none/conftest.py | 2 +- .../test_binary_symbol_in_parameters.py | 10 +- .../params/test_different_parameter_types.py | 5 +- .../params/test_in_class_parameterized.py | 3 +- examples/skip/test_simple_skip.py | 3 +- examples/skip/test_skip_issue.py | 9 +- .../test_case_id/test_case_id_decorator.py | 1 + .../test_case_id_decorator_no_id.py | 3 +- ...st_case_id_decorator_no_id_params_false.py | 3 +- ...est_case_id_decorator_no_id_params_true.py | 3 +- ..._case_id_decorator_no_id_partial_params.py | 5 +- ..._id_decorator_no_id_partial_params_true.py | 5 +- .../test_case_id_decorator_params_false.py | 3 +- .../test_case_id_decorator_params_no.py | 3 +- ...test_case_id_decorator_params_partially.py | 5 +- .../test_case_id_decorator_params_true.py | 3 +- examples/test_issue_id.py | 6 +- examples/test_issue_id_pass.py | 6 +- examples/test_max_item_name.py | 1 + examples/test_simple.py | 1 + examples/test_simple_fail.py | 1 + pyproject.toml | 8 + pytest_reportportal/__init__.py | 2 +- pytest_reportportal/config.py | 129 +++-- pytest_reportportal/plugin.py | 361 +++++++------- pytest_reportportal/rp_logging.py | 49 +- pytest_reportportal/service.py | 440 +++++++++--------- setup.py | 49 +- tests/__init__.py | 4 +- tests/helpers/utils.py | 110 ++--- tests/integration/__init__.py | 340 +++++++++----- tests/integration/test_attributes.py | 60 +-- tests/integration/test_case_id_report.py | 61 ++- tests/integration/test_code_reference.py | 30 +- tests/integration/test_config_handling.py | 192 ++++---- tests/integration/test_connection_close.py | 4 +- tests/integration/test_custom_name.py | 19 +- tests/integration/test_debug_mode.py | 16 +- tests/integration/test_empty_run.py | 13 +- tests/integration/test_fixtures.py | 235 +++++----- tests/integration/test_issue_report.py | 95 ++-- tests/integration/test_max_name_length.py | 6 +- tests/integration/test_parameters_report.py | 26 +- tests/integration/test_pass_failed_skipped.py | 36 +- tests/integration/test_pytest_parallel.py | 17 +- tests/integration/test_suite_hierarchy.py | 21 +- tests/integration/test_threads_logs.py | 9 +- tests/unit/conftest.py | 55 +-- tests/unit/test_config.py | 20 +- tests/unit/test_plugin.py | 241 +++++----- tests/unit/test_service.py | 8 +- 72 files changed, 1443 insertions(+), 1391 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2c83d96..071dfb1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,27 +1,31 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files -- repo: https://github.com/PyCQA/pydocstyle - rev: 6.0.0 + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + - repo: https://github.com/PyCQA/pydocstyle + rev: 6.3.0 hooks: - - id: pydocstyle + - id: pydocstyle exclude: | - (?x)^( - tests/.* | - examples/.* - ) -- repo: https://github.com/Lucas-C/pre-commit-hooks-markup - rev: v1.0.1 + (?x)^( + tests/.* + ) + - repo: https://github.com/psf/black + rev: 24.10.0 hooks: - - id: rst-linter -- repo: https://github.com/pycqa/flake8 - rev: 5.0.4 + - id: black + args: [ '--check', 'robotframework_reportportal', 'tests' ] + - repo: https://github.com/pycqa/isort + rev: 5.13.2 hooks: - - id: flake8 + - id: isort + - repo: https://github.com/pycqa/flake8 + rev: 7.1.1 + hooks: + - id: flake8 diff --git a/examples/attributes/test_runtime_attribute.py b/examples/attributes/test_runtime_attribute.py index 45ecd22..55fad91 100644 --- a/examples/attributes/test_runtime_attribute.py +++ b/examples/attributes/test_runtime_attribute.py @@ -20,7 +20,5 @@ def test_custom_attributes_report(request): This is a test with one custom marker as a decorator and one custom marker added at runtime which shall both appear on ReportPortal on test's item """ - request.node.add_marker( - pytest.mark.runtime() - ) + request.node.add_marker(pytest.mark.runtime()) assert True diff --git a/examples/custom_name/test_custom_name_args.py b/examples/custom_name/test_custom_name_args.py index cf2386f..a20c3c6 100644 --- a/examples/custom_name/test_custom_name_args.py +++ b/examples/custom_name/test_custom_name_args.py @@ -13,7 +13,7 @@ # limitations under the License. import pytest -TEST_NAME_ARGS = 'Test name by mark' +TEST_NAME_ARGS = "Test name by mark" @pytest.mark.name(TEST_NAME_ARGS) diff --git a/examples/custom_name/test_custom_name_empty.py b/examples/custom_name/test_custom_name_empty.py index 50373fe..57ac526 100644 --- a/examples/custom_name/test_custom_name_empty.py +++ b/examples/custom_name/test_custom_name_empty.py @@ -13,7 +13,7 @@ # limitations under the License. import pytest -TEST_NAME_EMPTY = 'examples/custom_name/test_custom_name_empty.py::test_name_by_mark_empty' +TEST_NAME_EMPTY = "examples/custom_name/test_custom_name_empty.py::test_name_by_mark_empty" @pytest.mark.name() diff --git a/examples/custom_name/test_custom_name_kwargs.py b/examples/custom_name/test_custom_name_kwargs.py index c4bce53..997fdb9 100644 --- a/examples/custom_name/test_custom_name_kwargs.py +++ b/examples/custom_name/test_custom_name_kwargs.py @@ -13,7 +13,7 @@ # limitations under the License. import pytest -TEST_NAME_KWARGS = 'Test name by mark, kwargs' +TEST_NAME_KWARGS = "Test name by mark, kwargs" @pytest.mark.name(name=TEST_NAME_KWARGS) diff --git a/examples/fixtures/class_fixture_return/conftest.py b/examples/fixtures/class_fixture_return/conftest.py index da43a33..33e6d2b 100644 --- a/examples/fixtures/class_fixture_return/conftest.py +++ b/examples/fixtures/class_fixture_return/conftest.py @@ -17,7 +17,7 @@ import pytest -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def class_fixture_return_config(): - print('setup') + print("setup") return mock.Mock() diff --git a/examples/fixtures/class_fixture_return/test_fixture_class_setup.py b/examples/fixtures/class_fixture_return/test_fixture_class_setup.py index 78881af..5900957 100644 --- a/examples/fixtures/class_fixture_return/test_fixture_class_setup.py +++ b/examples/fixtures/class_fixture_return/test_fixture_class_setup.py @@ -24,6 +24,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + class TestClassOne: def test_fixture_class_setup_first(self, class_fixture_return_config): assert class_fixture_return_config is not None diff --git a/examples/fixtures/module_fixture_return/conftest.py b/examples/fixtures/module_fixture_return/conftest.py index 3336e6a..8d72be3 100644 --- a/examples/fixtures/module_fixture_return/conftest.py +++ b/examples/fixtures/module_fixture_return/conftest.py @@ -17,7 +17,7 @@ import pytest -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def module_fixture_return_config(): - print('setup') + print("setup") return mock.Mock() diff --git a/examples/fixtures/module_fixture_return/test_fixture_module_setup.py b/examples/fixtures/module_fixture_return/test_fixture_module_setup.py index b11fe6d..cfac0db 100644 --- a/examples/fixtures/module_fixture_return/test_fixture_module_setup.py +++ b/examples/fixtures/module_fixture_return/test_fixture_module_setup.py @@ -24,6 +24,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_fixture_module_setup_first(module_fixture_return_config): assert module_fixture_return_config is not None diff --git a/examples/fixtures/package_fixture_return/conftest.py b/examples/fixtures/package_fixture_return/conftest.py index f406944..a9bb436 100644 --- a/examples/fixtures/package_fixture_return/conftest.py +++ b/examples/fixtures/package_fixture_return/conftest.py @@ -17,7 +17,7 @@ import pytest -@pytest.fixture(scope='package') +@pytest.fixture(scope="package") def package_fixture_return_config(): - print('setup') + print("setup") return mock.Mock() diff --git a/examples/fixtures/package_fixture_return/test_fixture_package_setup_first.py b/examples/fixtures/package_fixture_return/test_fixture_package_setup_first.py index 4c42833..1037354 100644 --- a/examples/fixtures/package_fixture_return/test_fixture_package_setup_first.py +++ b/examples/fixtures/package_fixture_return/test_fixture_package_setup_first.py @@ -24,5 +24,6 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_fixture_package_setup_first(package_fixture_return_config): assert package_fixture_return_config is not None diff --git a/examples/fixtures/package_fixture_return/test_fixture_package_setup_second.py b/examples/fixtures/package_fixture_return/test_fixture_package_setup_second.py index 7ba2f2a..f66e738 100644 --- a/examples/fixtures/package_fixture_return/test_fixture_package_setup_second.py +++ b/examples/fixtures/package_fixture_return/test_fixture_package_setup_second.py @@ -24,5 +24,6 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_fixture_package_setup_second(package_fixture_return_config): assert package_fixture_return_config is not None diff --git a/examples/fixtures/session_fixture_return/conftest.py b/examples/fixtures/session_fixture_return/conftest.py index 9ab2017..fd08838 100644 --- a/examples/fixtures/session_fixture_return/conftest.py +++ b/examples/fixtures/session_fixture_return/conftest.py @@ -17,7 +17,7 @@ import pytest -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def session_fixture_return_config(): - print('setup') + print("setup") return mock.Mock() diff --git a/examples/fixtures/test_failure_fixture_teardown/conftest.py b/examples/fixtures/test_failure_fixture_teardown/conftest.py index 3100fac..e3df987 100644 --- a/examples/fixtures/test_failure_fixture_teardown/conftest.py +++ b/examples/fixtures/test_failure_fixture_teardown/conftest.py @@ -22,8 +22,8 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_BEFORE_YIELD = 'Log message before yield and test failure' -LOG_MESSAGE_TEARDOWN = 'Log message for teardown after test failure' +LOG_MESSAGE_BEFORE_YIELD = "Log message before yield and test failure" +LOG_MESSAGE_TEARDOWN = "Log message for teardown after test failure" @pytest.fixture diff --git a/examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py b/examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py index 31f6884..390feaa 100644 --- a/examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py +++ b/examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py @@ -24,5 +24,6 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_failure_fixture_teardown(test_failure_fixture_teardown_config): assert test_failure_fixture_teardown_config is None diff --git a/examples/fixtures/test_fixture_return_none/conftest.py b/examples/fixtures/test_fixture_return_none/conftest.py index a81a879..a320912 100644 --- a/examples/fixtures/test_fixture_return_none/conftest.py +++ b/examples/fixtures/test_fixture_return_none/conftest.py @@ -21,7 +21,7 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_SETUP = 'Log message for setup and return None' +LOG_MESSAGE_SETUP = "Log message for setup and return None" @pytest.fixture diff --git a/examples/fixtures/test_fixture_setup/conftest.py b/examples/fixtures/test_fixture_setup/conftest.py index 36897fa..7d74f8e 100644 --- a/examples/fixtures/test_fixture_setup/conftest.py +++ b/examples/fixtures/test_fixture_setup/conftest.py @@ -22,7 +22,7 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_SETUP = 'Log message for setup' +LOG_MESSAGE_SETUP = "Log message for setup" @pytest.fixture diff --git a/examples/fixtures/test_fixture_setup_failure/conftest.py b/examples/fixtures/test_fixture_setup_failure/conftest.py index a6dfce2..5995d54 100644 --- a/examples/fixtures/test_fixture_setup_failure/conftest.py +++ b/examples/fixtures/test_fixture_setup_failure/conftest.py @@ -21,10 +21,10 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_SETUP = 'Log message for setup failure' +LOG_MESSAGE_SETUP = "Log message for setup failure" @pytest.fixture def test_fixture_setup_failure_config(): logging.error(LOG_MESSAGE_SETUP) - raise Exception('Fixture setup failed') + raise Exception("Fixture setup failed") diff --git a/examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py b/examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py index 76c53b0..db4fe95 100644 --- a/examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py +++ b/examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py @@ -32,7 +32,7 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_TEST = 'Log message for test of setup failure' +LOG_MESSAGE_TEST = "Log message for test of setup failure" def test_fixture_setup_failure(test_fixture_setup_failure_config): diff --git a/examples/fixtures/test_fixture_teardown/conftest.py b/examples/fixtures/test_fixture_teardown/conftest.py index 29b3e70..10d3d6a 100644 --- a/examples/fixtures/test_fixture_teardown/conftest.py +++ b/examples/fixtures/test_fixture_teardown/conftest.py @@ -22,8 +22,8 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_BEFORE_YIELD = 'Log message before yield' -LOG_MESSAGE_TEARDOWN = 'Log message for teardown' +LOG_MESSAGE_BEFORE_YIELD = "Log message before yield" +LOG_MESSAGE_TEARDOWN = "Log message for teardown" @pytest.fixture diff --git a/examples/fixtures/test_fixture_teardown_failure/conftest.py b/examples/fixtures/test_fixture_teardown_failure/conftest.py index 3315a86..17c5e21 100644 --- a/examples/fixtures/test_fixture_teardown_failure/conftest.py +++ b/examples/fixtures/test_fixture_teardown_failure/conftest.py @@ -22,8 +22,8 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_BEFORE_YIELD = 'Log message before yield and failure' -LOG_MESSAGE_TEARDOWN = 'Log message for failure teardown' +LOG_MESSAGE_BEFORE_YIELD = "Log message before yield and failure" +LOG_MESSAGE_TEARDOWN = "Log message for failure teardown" @pytest.fixture @@ -31,4 +31,4 @@ def test_fixture_teardown_failure_config(): logging.error(LOG_MESSAGE_BEFORE_YIELD) yield mock.Mock() logging.error(LOG_MESSAGE_TEARDOWN) - raise Exception('Fixture teardown failed') + raise Exception("Fixture teardown failed") diff --git a/examples/fixtures/test_fixture_yield_none/conftest.py b/examples/fixtures/test_fixture_yield_none/conftest.py index 28839ec..e35b52a 100644 --- a/examples/fixtures/test_fixture_yield_none/conftest.py +++ b/examples/fixtures/test_fixture_yield_none/conftest.py @@ -21,7 +21,7 @@ LOGGER.setLevel(logging.DEBUG) logging.setLoggerClass(RPLogger) -LOG_MESSAGE_SETUP = 'Log message for setup and yield None' +LOG_MESSAGE_SETUP = "Log message for setup and yield None" @pytest.fixture diff --git a/examples/params/test_binary_symbol_in_parameters.py b/examples/params/test_binary_symbol_in_parameters.py index 6ae5cac..241a1f6 100644 --- a/examples/params/test_binary_symbol_in_parameters.py +++ b/examples/params/test_binary_symbol_in_parameters.py @@ -1,16 +1,14 @@ """A simple example test with different parameter types.""" -import pytest +import pytest -BINARY_TEXT = 'Some text with binary symbol \0' +BINARY_TEXT = "Some text with binary symbol \0" -@pytest.mark.parametrize( - ['text'], [[BINARY_TEXT]] -) +@pytest.mark.parametrize(["text"], [[BINARY_TEXT]]) def test_in_class_parameterized(text): """ This is my test with different parameter types. """ assert text == BINARY_TEXT - assert text != BINARY_TEXT.replace('\0', '\\0') + assert text != BINARY_TEXT.replace("\0", "\\0") diff --git a/examples/params/test_different_parameter_types.py b/examples/params/test_different_parameter_types.py index c85b9de..d219884 100644 --- a/examples/params/test_different_parameter_types.py +++ b/examples/params/test_different_parameter_types.py @@ -1,10 +1,9 @@ """A simple example test with different parameter types.""" + import pytest -@pytest.mark.parametrize( - ['integer', 'floating_point', 'boolean', 'none'], [(1, 1.5, True, None)] -) +@pytest.mark.parametrize(["integer", "floating_point", "boolean", "none"], [(1, 1.5, True, None)]) def test_in_class_parameterized(integer, floating_point, boolean, none): """ This is my test with different parameter types. diff --git a/examples/params/test_in_class_parameterized.py b/examples/params/test_in_class_parameterized.py index cc58c1d..8c3403c 100644 --- a/examples/params/test_in_class_parameterized.py +++ b/examples/params/test_in_class_parameterized.py @@ -1,10 +1,11 @@ """A simple example test in a class with a parameter.""" + import pytest class Tests: - @pytest.mark.parametrize('param', ['param']) + @pytest.mark.parametrize("param", ["param"]) def test_in_class_parameterized(self, param): """ This is my test inside `Tests` class with a parameter diff --git a/examples/skip/test_simple_skip.py b/examples/skip/test_simple_skip.py index 64afdb1..69ce838 100644 --- a/examples/skip/test_simple_skip.py +++ b/examples/skip/test_simple_skip.py @@ -1,4 +1,5 @@ """Simple example skipped test.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +15,6 @@ import pytest -@pytest.mark.skip(reason='no way of currently testing this') +@pytest.mark.skip(reason="no way of currently testing this") def test_simple_skip(): assert False diff --git a/examples/skip/test_skip_issue.py b/examples/skip/test_skip_issue.py index c510a87..3674616 100644 --- a/examples/skip/test_skip_issue.py +++ b/examples/skip/test_skip_issue.py @@ -1,4 +1,5 @@ """Simple example skipped test.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,12 +14,12 @@ # limitations under the License import pytest -ID = 'ABC-1234' -REASON = 'some_bug' -TYPE = 'PB' +ID = "ABC-1234" +REASON = "some_bug" +TYPE = "PB" @pytest.mark.issue(issue_id=ID, reason=REASON, issue_type=TYPE) -@pytest.mark.skip(reason='no way of currently testing this') +@pytest.mark.skip(reason="no way of currently testing this") def test_simple_skip(): assert False diff --git a/examples/test_case_id/test_case_id_decorator.py b/examples/test_case_id/test_case_id_decorator.py index c826c7c..9bbda94 100644 --- a/examples/test_case_id/test_case_id_decorator.py +++ b/examples/test_case_id/test_case_id_decorator.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/test_case_id/test_case_id_decorator_no_id.py b/examples/test_case_id/test_case_id_decorator_no_id.py index 580f97d..5d59a68 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id.py +++ b/examples/test_case_id/test_case_id_decorator_no_id.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_no_id_params_false.py b/examples/test_case_id/test_case_id_decorator_no_id_params_false.py index dda3126..005046a 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id_params_false.py +++ b/examples/test_case_id/test_case_id_decorator_no_id_params_false.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(parameterized=False) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_no_id_params_true.py b/examples/test_case_id/test_case_id_decorator_no_id_params_true.py index c0ada01..5ff41b2 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id_params_true.py +++ b/examples/test_case_id/test_case_id_decorator_no_id_params_true.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(parameterized=True) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_no_id_partial_params.py b/examples/test_case_id/test_case_id_decorator_no_id_partial_params.py index 708fb7a..2a980bd 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id_partial_params.py +++ b/examples/test_case_id/test_case_id_decorator_no_id_partial_params.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) -@pytest.mark.tc_id(params=['param2']) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) +@pytest.mark.tc_id(params=["param2"]) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py b/examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py index a7953a7..2222af6 100644 --- a/examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py +++ b/examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator no arguments.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +16,7 @@ import pytest -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) -@pytest.mark.tc_id(parameterized=True, params=['param2']) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) +@pytest.mark.tc_id(parameterized=True, params=["param2"]) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_params_false.py b/examples/test_case_id/test_case_id_decorator_params_false.py index 9c273af..0830783 100644 --- a/examples/test_case_id/test_case_id_decorator_params_false.py +++ b/examples/test_case_id/test_case_id_decorator_params_false.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator and parameters.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +18,7 @@ TEST_CASE_ID = "ISSUE-321" -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(TEST_CASE_ID, parameterized=False) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_params_no.py b/examples/test_case_id/test_case_id_decorator_params_no.py index 0fa5306..00cf143 100644 --- a/examples/test_case_id/test_case_id_decorator_params_no.py +++ b/examples/test_case_id/test_case_id_decorator_params_no.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator and parameters.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +18,7 @@ TEST_CASE_ID = "ISSUE-132" -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(TEST_CASE_ID) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_params_partially.py b/examples/test_case_id/test_case_id_decorator_params_partially.py index ec998a2..88995fa 100644 --- a/examples/test_case_id/test_case_id_decorator_params_partially.py +++ b/examples/test_case_id/test_case_id_decorator_params_partially.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator and parameters.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +18,7 @@ TEST_CASE_ID = "ISSUE-213" -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) -@pytest.mark.tc_id(TEST_CASE_ID, parameterized=True, params=['param1']) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) +@pytest.mark.tc_id(TEST_CASE_ID, parameterized=True, params=["param1"]) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_case_id/test_case_id_decorator_params_true.py b/examples/test_case_id/test_case_id_decorator_params_true.py index d04283f..bb3db5a 100644 --- a/examples/test_case_id/test_case_id_decorator_params_true.py +++ b/examples/test_case_id/test_case_id_decorator_params_true.py @@ -1,4 +1,5 @@ """A simple example test with Test Case ID decorator and parameters.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +18,7 @@ TEST_CASE_ID = "ISSUE-231" -@pytest.mark.parametrize(('param1', 'param2'), [('value1', 'value2')]) +@pytest.mark.parametrize(("param1", "param2"), [("value1", "value2")]) @pytest.mark.tc_id(TEST_CASE_ID, parameterized=True) def test_case_id_decorator(param1, param2): assert True diff --git a/examples/test_issue_id.py b/examples/test_issue_id.py index 46eadaa..38abcf1 100644 --- a/examples/test_issue_id.py +++ b/examples/test_issue_id.py @@ -13,9 +13,9 @@ import pytest -ID = 'ABC-1234' -REASON = 'some_bug' -TYPE = 'PB' +ID = "ABC-1234" +REASON = "some_bug" +TYPE = "PB" @pytest.mark.issue(issue_id=ID, reason=REASON, issue_type=TYPE) diff --git a/examples/test_issue_id_pass.py b/examples/test_issue_id_pass.py index 889deea..d4347d8 100644 --- a/examples/test_issue_id_pass.py +++ b/examples/test_issue_id_pass.py @@ -13,9 +13,9 @@ import pytest -ID = 'ABC-1234' -REASON = 'some_bug' -TYPE = 'PB' +ID = "ABC-1234" +REASON = "some_bug" +TYPE = "PB" @pytest.mark.issue(issue_id=ID, reason=REASON, issue_type=TYPE) diff --git a/examples/test_max_item_name.py b/examples/test_max_item_name.py index e96d684..4e929a2 100644 --- a/examples/test_max_item_name.py +++ b/examples/test_max_item_name.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_thi_is_simple_example_test_with_the_name_longer_than_maximum_allowed_lorem_ipsum_dolor_sit_amet_consectetur_adipiscing_elit_sed_do_eiusmod_tempor_incididunt_ut_labore_et_dolore_magna_aliqua_ut_enim_ad_minim_veniam_quis_nostrud_exercitation_ullamco_laboris_nisi_ut_aliquip_ex_ea_commodo_consequat_duis_aute_irure_dolor_in_reprehenderit_in_voluptate_velit_esse_cillum_dolore_eu_fugiat_nulla_pariatur_excepteur_sint_occaecat_cupidatat_non_proident_sunt_in_culpa_qui_officia_deserunt_mollit_anim_id_est_laborum_sed_ut_perspiciatis_unde_omnis_iste_natus_error_sit_voluptatem_accusantium_doloremque_laudantium_totam_rem_aperiam_eaque_ipsa_quae_ab_illo_inventore_veritatis_et_quasi_architecto_beatae_vitae_dicta_sunt_explicabo_nemo_enim_ipsam_voluptatem_quia_voluptas_sit_aspernatur_aut_odit_aut_fugit_sed_quia_consequuntur_magni_dolores_eos_qui_ratione_voluptatem_sequi_nesciunt_neque_porro_quisquam_est_qui_dolorem_ipsum_quia_dolor_sit_amet_consectetur_adipisci_velit_sed_quia_non_numquam_eius_modi_tempora_incidunt_ut_labore_et_dolore_magnam_aliquam_quaerat_voluptatem(): # noqa: E501 """Simple example test with the name longer than maximum allowed.""" assert True diff --git a/examples/test_simple.py b/examples/test_simple.py index c63f45d..34f2441 100644 --- a/examples/test_simple.py +++ b/examples/test_simple.py @@ -1,4 +1,5 @@ """Simple example test.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/test_simple_fail.py b/examples/test_simple_fail.py index 6fffad6..91094d5 100644 --- a/examples/test_simple_fail.py +++ b/examples/test_simple_fail.py @@ -1,4 +1,5 @@ """Simple example test which fails.""" + # Copyright (c) 2022 https://reportportal.io . # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pyproject.toml b/pyproject.toml index ced8293..b9bf7b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,3 +6,11 @@ requires = [ "wheel==0.40.0", ] build-backend = "setuptools.build_meta" + +[tool.isort] +line_length = 119 +profile = "black" + +[tool.black] +line-length = 119 +target-version = ["py310"] diff --git a/pytest_reportportal/__init__.py b/pytest_reportportal/__init__.py index 36b85bb..83db8ba 100644 --- a/pytest_reportportal/__init__.py +++ b/pytest_reportportal/__init__.py @@ -13,6 +13,6 @@ """This package contains Pytest agent's code for the Report Portal.""" -__all__ = ['LAUNCH_WAIT_TIMEOUT'] +__all__ = ["LAUNCH_WAIT_TIMEOUT"] LAUNCH_WAIT_TIMEOUT = 10 diff --git a/pytest_reportportal/config.py b/pytest_reportportal/config.py index 0b20d34..26d6b75 100644 --- a/pytest_reportportal/config.py +++ b/pytest_reportportal/config.py @@ -15,10 +15,10 @@ import warnings from os import getenv -from typing import Optional, Union, Any, Tuple +from typing import Any, Optional, Tuple, Union from _pytest.config import Config -from reportportal_client import OutputType, ClientType +from reportportal_client import ClientType, OutputType from reportportal_client.helpers import to_bool from reportportal_client.logs import MAX_LOG_BATCH_PAYLOAD_SIZE @@ -26,8 +26,7 @@ # This try/except can go away once we support pytest >= 5.4.0 from _pytest.logging import get_actual_log_level except ImportError: - from _pytest.logging import get_log_level_for_setting as \ - get_actual_log_level + from _pytest.logging import get_log_level_for_setting as get_actual_log_level class AgentConfig: @@ -72,107 +71,105 @@ class AgentConfig: def __init__(self, pytest_config: Config) -> None: """Initialize required attributes.""" - self.rp_rerun = (pytest_config.option.rp_rerun or pytest_config.getini('rp_rerun')) - self.rp_endpoint = self.find_option(pytest_config, 'rp_endpoint') - self.rp_hierarchy_code = to_bool(self.find_option(pytest_config, 'rp_hierarchy_code')) - self.rp_dir_level = int(self.find_option(pytest_config, 'rp_hierarchy_dirs_level')) - self.rp_hierarchy_dirs = to_bool(self.find_option(pytest_config, 'rp_hierarchy_dirs')) - self.rp_dir_path_separator = self.find_option(pytest_config, 'rp_hierarchy_dir_path_separator') - self.rp_hierarchy_test_file = to_bool(self.find_option(pytest_config, 'rp_hierarchy_test_file')) - self.rp_ignore_attributes = set(self.find_option(pytest_config, 'rp_ignore_attributes') or []) - self.rp_is_skipped_an_issue = self.find_option(pytest_config, 'rp_is_skipped_an_issue') - self.rp_issue_id_marks = self.find_option(pytest_config, 'rp_issue_id_marks') - self.rp_bts_issue_url = self.find_option(pytest_config, 'rp_bts_issue_url') + self.rp_rerun = pytest_config.option.rp_rerun or pytest_config.getini("rp_rerun") + self.rp_endpoint = self.find_option(pytest_config, "rp_endpoint") + self.rp_hierarchy_code = to_bool(self.find_option(pytest_config, "rp_hierarchy_code")) + self.rp_dir_level = int(self.find_option(pytest_config, "rp_hierarchy_dirs_level")) + self.rp_hierarchy_dirs = to_bool(self.find_option(pytest_config, "rp_hierarchy_dirs")) + self.rp_dir_path_separator = self.find_option(pytest_config, "rp_hierarchy_dir_path_separator") + self.rp_hierarchy_test_file = to_bool(self.find_option(pytest_config, "rp_hierarchy_test_file")) + self.rp_ignore_attributes = set(self.find_option(pytest_config, "rp_ignore_attributes") or []) + self.rp_is_skipped_an_issue = self.find_option(pytest_config, "rp_is_skipped_an_issue") + self.rp_issue_id_marks = self.find_option(pytest_config, "rp_issue_id_marks") + self.rp_bts_issue_url = self.find_option(pytest_config, "rp_bts_issue_url") if not self.rp_bts_issue_url: - self.rp_bts_issue_url = self.find_option(pytest_config, 'rp_issue_system_url') + self.rp_bts_issue_url = self.find_option(pytest_config, "rp_issue_system_url") if self.rp_bts_issue_url: warnings.warn( - 'Parameter `rp_issue_system_url` is deprecated since 5.4.0 and will be subject for removing' - 'in the next major version. Use `rp_bts_issue_url` argument instead.', + "Parameter `rp_issue_system_url` is deprecated since 5.4.0 and will be subject for removing" + "in the next major version. Use `rp_bts_issue_url` argument instead.", DeprecationWarning, - 2 + 2, ) - self.rp_bts_project = self.find_option(pytest_config, 'rp_bts_project') - self.rp_bts_url = self.find_option(pytest_config, 'rp_bts_url') - self.rp_launch = self.find_option(pytest_config, 'rp_launch') - self.rp_launch_id = self.find_option(pytest_config, 'rp_launch_id') - self.rp_launch_attributes = self.find_option(pytest_config, 'rp_launch_attributes') - self.rp_launch_description = self.find_option(pytest_config, 'rp_launch_description') - self.rp_log_batch_size = int(self.find_option(pytest_config, 'rp_log_batch_size')) - batch_payload_size = self.find_option(pytest_config, 'rp_log_batch_payload_size') + self.rp_bts_project = self.find_option(pytest_config, "rp_bts_project") + self.rp_bts_url = self.find_option(pytest_config, "rp_bts_url") + self.rp_launch = self.find_option(pytest_config, "rp_launch") + self.rp_launch_id = self.find_option(pytest_config, "rp_launch_id") + self.rp_launch_attributes = self.find_option(pytest_config, "rp_launch_attributes") + self.rp_launch_description = self.find_option(pytest_config, "rp_launch_description") + self.rp_log_batch_size = int(self.find_option(pytest_config, "rp_log_batch_size")) + batch_payload_size = self.find_option(pytest_config, "rp_log_batch_payload_size") if batch_payload_size: self.rp_log_batch_payload_size = int(batch_payload_size) else: self.rp_log_batch_payload_size = MAX_LOG_BATCH_PAYLOAD_SIZE - self.rp_log_level = get_actual_log_level(pytest_config, 'rp_log_level') - self.rp_log_format = self.find_option(pytest_config, 'rp_log_format') - self.rp_thread_logging = to_bool(self.find_option(pytest_config, 'rp_thread_logging') or False) - self.rp_mode = self.find_option(pytest_config, 'rp_mode') - self.rp_parent_item_id = self.find_option(pytest_config, 'rp_parent_item_id') - self.rp_project = self.find_option(pytest_config, 'rp_project') - self.rp_rerun_of = self.find_option(pytest_config, 'rp_rerun_of') - self.rp_skip_connection_test = to_bool(self.find_option(pytest_config, 'rp_skip_connection_test')) - - rp_api_retries_str = self.find_option(pytest_config, 'rp_api_retries') + self.rp_log_level = get_actual_log_level(pytest_config, "rp_log_level") + self.rp_log_format = self.find_option(pytest_config, "rp_log_format") + self.rp_thread_logging = to_bool(self.find_option(pytest_config, "rp_thread_logging") or False) + self.rp_mode = self.find_option(pytest_config, "rp_mode") + self.rp_parent_item_id = self.find_option(pytest_config, "rp_parent_item_id") + self.rp_project = self.find_option(pytest_config, "rp_project") + self.rp_rerun_of = self.find_option(pytest_config, "rp_rerun_of") + self.rp_skip_connection_test = to_bool(self.find_option(pytest_config, "rp_skip_connection_test")) + + rp_api_retries_str = self.find_option(pytest_config, "rp_api_retries") rp_api_retries = rp_api_retries_str and int(rp_api_retries_str) if rp_api_retries and rp_api_retries > 0: self.rp_api_retries = rp_api_retries else: - rp_api_retries_str = self.find_option(pytest_config, 'retries') + rp_api_retries_str = self.find_option(pytest_config, "retries") rp_api_retries = rp_api_retries_str and int(rp_api_retries_str) if rp_api_retries and rp_api_retries > 0: self.rp_api_retries = rp_api_retries warnings.warn( - 'Parameter `retries` is deprecated since 5.1.9 ' - 'and will be subject for removing in the next ' - 'major version. Use `rp_api_retries` argument ' - 'instead.', + "Parameter `retries` is deprecated since 5.1.9 " + "and will be subject for removing in the next " + "major version. Use `rp_api_retries` argument " + "instead.", DeprecationWarning, - 2 + 2, ) else: self.rp_api_retries = 0 - self.rp_api_key = getenv( - 'RP_API_KEY') or self.find_option(pytest_config, 'rp_api_key') + self.rp_api_key = getenv("RP_API_KEY") or self.find_option(pytest_config, "rp_api_key") if not self.rp_api_key: - self.rp_api_key = getenv( - 'RP_UUID') or self.find_option(pytest_config, 'rp_uuid') + self.rp_api_key = getenv("RP_UUID") or self.find_option(pytest_config, "rp_uuid") if self.rp_api_key: warnings.warn( - 'Parameter `rp_uuid` is deprecated since 5.1.9 ' - 'and will be subject for removing in the next ' - 'major version. Use `rp_api_key` argument ' - 'instead.', + "Parameter `rp_uuid` is deprecated since 5.1.9 " + "and will be subject for removing in the next " + "major version. Use `rp_api_key` argument " + "instead.", DeprecationWarning, - 2 + 2, ) else: warnings.warn( - 'Argument `rp_api_key` is `None` or empty string, ' - 'that is not supposed to happen because Report ' - 'Portal is usually requires an authorization key. ' - 'Please check your configuration.', + "Argument `rp_api_key` is `None` or empty string, " + "that is not supposed to happen because Report " + "Portal is usually requires an authorization key. " + "Please check your configuration.", RuntimeWarning, - 2 + 2, ) - rp_verify_ssl = self.find_option(pytest_config, 'rp_verify_ssl', True) + rp_verify_ssl = self.find_option(pytest_config, "rp_verify_ssl", True) try: self.rp_verify_ssl = to_bool(rp_verify_ssl) except (ValueError, AttributeError): self.rp_verify_ssl = rp_verify_ssl - self.rp_launch_timeout = int(self.find_option(pytest_config, 'rp_launch_timeout')) + self.rp_launch_timeout = int(self.find_option(pytest_config, "rp_launch_timeout")) - self.rp_launch_uuid_print = to_bool(self.find_option(pytest_config, 'rp_launch_uuid_print') or 'False') - print_output = self.find_option(pytest_config, 'rp_launch_uuid_print_output') + self.rp_launch_uuid_print = to_bool(self.find_option(pytest_config, "rp_launch_uuid_print") or "False") + print_output = self.find_option(pytest_config, "rp_launch_uuid_print_output") self.rp_launch_uuid_print_output = OutputType[print_output.upper()] if print_output else None - client_type = self.find_option(pytest_config, 'rp_client_type') + client_type = self.find_option(pytest_config, "rp_client_type") self.rp_client_type = ClientType[client_type.upper()] if client_type else ClientType.SYNC - connect_timeout = self.find_option(pytest_config, 'rp_connect_timeout') + connect_timeout = self.find_option(pytest_config, "rp_connect_timeout") connect_timeout = float(connect_timeout) if connect_timeout else None - read_timeout = self.find_option(pytest_config, 'rp_read_timeout') + read_timeout = self.find_option(pytest_config, "rp_read_timeout") read_timeout = float(read_timeout) if read_timeout else None if connect_timeout is None and read_timeout is None: self.rp_http_timeout = None @@ -180,7 +177,7 @@ def __init__(self, pytest_config: Config) -> None: self.rp_http_timeout = (connect_timeout, read_timeout) else: self.rp_http_timeout = connect_timeout or read_timeout - self.rp_report_fixtures = to_bool(self.find_option(pytest_config, 'rp_report_fixtures', False)) + self.rp_report_fixtures = to_bool(self.find_option(pytest_config, "rp_report_fixtures", False)) # noinspection PyMethodMayBeStatic def find_option(self, pytest_config: Config, option_name: str, default: Any = None) -> Any: @@ -197,7 +194,7 @@ def find_option(self, pytest_config: Config, option_name: str, default: Any = No :param default: value to be returned if not found :return: option value """ - value = (getattr(pytest_config.option, option_name, None) or pytest_config.getini(option_name)) + value = getattr(pytest_config.option, option_name, None) or pytest_config.getini(option_name) if isinstance(value, bool): return value return value or default diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index d28c820..c36c2f7 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -22,10 +22,11 @@ import _pytest.logging import dill as pickle import pytest + # noinspection PyPackageRequirements import requests -from pytest import Session, Item -from reportportal_client import RPLogHandler, RP +from pytest import Item, Session +from reportportal_client import RP, RPLogHandler from reportportal_client.errors import ResponseError from reportportal_client.logs import MAX_LOG_BATCH_PAYLOAD_SIZE @@ -37,21 +38,25 @@ try: # noinspection PyPackageRequirements from pytest_bdd import given # noqa: F401 + PYTEST_BDD = True except ImportError: PYTEST_BDD = False log: Logger = logging.getLogger(__name__) -MANDATORY_PARAMETER_MISSED_PATTERN: str = \ - 'One of the following mandatory parameters is unset: ' + \ - 'rp_project: {}, ' + \ - 'rp_endpoint: {}, ' + \ - 'rp_api_key: {}' +MANDATORY_PARAMETER_MISSED_PATTERN: str = ( + "One of the following mandatory parameters is unset: " + + "rp_project: {}, " + + "rp_endpoint: {}, " + + "rp_api_key: {}" +) -FAILED_LAUNCH_WAIT: str = 'Failed to initialize reportportal-client service. ' \ - + 'Waiting for Launch start timed out. ' \ - + 'Reporting is disabled.' +FAILED_LAUNCH_WAIT: str = ( + "Failed to initialize reportportal-client service. " + + "Waiting for Launch start timed out. " + + "Reporting is disabled." +) @pytest.hookimpl(optionalhook=True) @@ -64,7 +69,7 @@ def pytest_configure_node(node: Any) -> None: if not node.config._rp_enabled: # Stop now if the plugin is not properly configured return - node.workerinput['py_test_service'] = pickle.dumps(node.config.py_test_service) + node.workerinput["py_test_service"] = pickle.dumps(node.config.py_test_service) # no 'config' type for backward compatibility for older pytest versions @@ -74,7 +79,7 @@ def is_control(config) -> bool: True if the code, running the given pytest.config object, is running as the xdist control node or not running xdist at all. """ - return not hasattr(config, 'workerinput') + return not hasattr(config, "workerinput") def wait_launch(rp_client: RP) -> bool: @@ -106,8 +111,7 @@ def pytest_sessionstart(session: Session) -> None: try: config.py_test_service.start() except ResponseError as response_error: - log.warning('Failed to initialize reportportal-client service. ' - 'Reporting is disabled.') + log.warning("Failed to initialize reportportal-client service. " "Reporting is disabled.") log.debug(str(response_error)) config.py_test_service.rp = None config._rp_enabled = False @@ -115,8 +119,7 @@ def pytest_sessionstart(session: Session) -> None: if is_control(config): config.py_test_service.start_launch() - if config.pluginmanager.hasplugin('xdist') \ - or config.pluginmanager.hasplugin('pytest-parallel'): + if config.pluginmanager.hasplugin("xdist") or config.pluginmanager.hasplugin("pytest-parallel"): if not wait_launch(session.config.py_test_service.rp): log.error(FAILED_LAUNCH_WAIT) config.py_test_service.rp = None @@ -161,20 +164,19 @@ def register_markers(config) -> None: :param config: Object of the pytest Config class """ config.addinivalue_line( - "markers", "issue(issue_id, reason, issue_type, url): mark test with " - "information about skipped or failed result" + "markers", + "issue(issue_id, reason, issue_type, url): mark test with " "information about skipped or failed result", ) config.addinivalue_line( - "markers", "tc_id(id, parameterized, params): report the test" - "case with a custom Test Case ID. Parameters: \n" - "parameterized [True / False] - use parameter values in " - "Test Case ID generation \n" - "params [parameter names as list] - use only specified" - "parameters" - ) - config.addinivalue_line( - "markers", "name(name): report the test case with a custom Name." + "markers", + "tc_id(id, parameterized, params): report the test" + "case with a custom Test Case ID. Parameters: \n" + "parameterized [True / False] - use parameter values in " + "Test Case ID generation \n" + "params [parameter names as list] - use only specified" + "parameters", ) + config.addinivalue_line("markers", "name(name): report the test case with a custom Name.") def check_connection(agent_config: AgentConfig): @@ -184,8 +186,8 @@ def check_connection(agent_config: AgentConfig): :param agent_config: Instance of the AgentConfig class :return True on successful connection check, either False """ - url = '{0}/api/v1/project/{1}'.format(agent_config.rp_endpoint, agent_config.rp_project) - headers = {'Authorization': 'bearer {0}'.format(agent_config.rp_api_key)} + url = "{0}/api/v1/project/{1}".format(agent_config.rp_endpoint, agent_config.rp_project) + headers = {"Authorization": "bearer {0}".format(agent_config.rp_api_key)} try: resp = requests.get(url, headers=headers, verify=agent_config.rp_verify_ssl) resp.raise_for_status() @@ -206,28 +208,27 @@ def pytest_configure(config) -> None: register_markers(config) config._rp_enabled = not ( - config.getoption('--collect-only', default=False) or - config.getoption('--setup-plan', default=False) or - not config.option.rp_enabled) + config.getoption("--collect-only", default=False) + or config.getoption("--setup-plan", default=False) + or not config.option.rp_enabled + ) if not config._rp_enabled: return agent_config = AgentConfig(config) - cond = (agent_config.rp_project, agent_config.rp_endpoint, - agent_config.rp_api_key) + cond = (agent_config.rp_project, agent_config.rp_endpoint, agent_config.rp_api_key) config._rp_enabled = all(cond) if not config._rp_enabled: log.debug(MANDATORY_PARAMETER_MISSED_PATTERN.format(*cond)) - log.debug('Disabling reporting to RP.') + log.debug("Disabling reporting to RP.") return if not agent_config.rp_skip_connection_test: config._rp_enabled = check_connection(agent_config) if not config._rp_enabled: - log.debug('Failed to establish connection with RP. ' - 'Disabling reporting.') + log.debug("Failed to establish connection with RP. " "Disabling reporting.") return config._reporter_config = agent_config @@ -236,7 +237,7 @@ def pytest_configure(config) -> None: config.py_test_service = PyTestServiceClass(agent_config) else: # noinspection PyUnresolvedReferences - config.py_test_service = pickle.loads(config.workerinput['py_test_service']) + config.py_test_service = pickle.loads(config.workerinput["py_test_service"]) # noinspection PyProtectedMember @@ -276,8 +277,11 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: service.start_pytest_item(item) log_level = agent_config.rp_log_level or logging.NOTSET log_handler = RPLogHandler( - level=log_level, filter_client_logs=True, endpoint=agent_config.rp_endpoint, - ignored_record_names=('reportportal_client', 'pytest_reportportal')) + level=log_level, + filter_client_logs=True, + endpoint=agent_config.rp_endpoint, + ignored_record_names=("reportportal_client", "pytest_reportportal"), + ) log_format = agent_config.rp_log_format if log_format: log_handler.setFormatter(logging.Formatter(log_format)) @@ -312,18 +316,18 @@ def report_fixture(request, fixturedef, name: str, error_msg: str) -> Generator[ :param error_msg: Error message """ config = request.config - enabled = getattr(config, '_rp_enabled', False) - service = getattr(config, 'py_test_service', None) - agent_config = getattr(config, '_reporter_config', object()) - report_fixtures = getattr(agent_config, 'rp_report_fixtures', False) + enabled = getattr(config, "_rp_enabled", False) + service = getattr(config, "py_test_service", None) + agent_config = getattr(config, "_reporter_config", object()) + report_fixtures = getattr(agent_config, "rp_report_fixtures", False) if not enabled or not service or not report_fixtures: yield return - cached_result = getattr(fixturedef, 'cached_result', None) - if cached_result and hasattr(cached_result, '__getitem__'): + cached_result = getattr(fixturedef, "cached_result", None) + if cached_result and hasattr(cached_result, "__getitem__"): result = fixturedef.cached_result[2] - if hasattr(result, '__getitem__'): + if hasattr(result, "__getitem__"): result = result[0] if result and isinstance(result, BaseException): yield @@ -341,8 +345,11 @@ def pytest_fixture_setup(fixturedef, request) -> Generator[None, Any, None]: :param request: represents fixture execution metadata """ yield from report_fixture( - request, fixturedef, f'{fixturedef.scope} fixture setup: {fixturedef.argname}', - f'{fixturedef.scope} fixture setup failed: {fixturedef.argname}') + request, + fixturedef, + f"{fixturedef.scope} fixture setup: {fixturedef.argname}", + f"{fixturedef.scope} fixture setup failed: {fixturedef.argname}", + ) # no types for backward compatibility for older pytest versions @@ -354,8 +361,11 @@ def pytest_fixture_post_finalizer(fixturedef, request) -> Generator[None, Any, N :param request: represents fixture execution metadata """ yield from report_fixture( - request, fixturedef, f'{fixturedef.scope} fixture teardown: {fixturedef.argname}', - f'{fixturedef.scope} fixture teardown failed: {fixturedef.argname}') + request, + fixturedef, + f"{fixturedef.scope} fixture teardown: {fixturedef.argname}", + f"{fixturedef.scope} fixture teardown failed: {fixturedef.argname}", + ) # no types for backward compatibility for older pytest versions @@ -364,9 +374,9 @@ def pytest_addoption(parser) -> None: :param parser: Object of the Parser class """ - group = parser.getgroup('reporting') + group = parser.getgroup("reporting") - def add_shared_option(name, help_str, default=None, action='store'): + def add_shared_option(name, help_str, default=None, action="store"): """ Add an option to both the command line and the .ini file. @@ -383,208 +393,149 @@ def add_shared_option(name, help_str, default=None, action='store'): help=help_str, ) group.addoption( - '--{0}'.format(name.replace('_', '-')), + "--{0}".format(name.replace("_", "-")), action=action, dest=name, - help='{help} (overrides {name} config option)'.format( + help="{help} (overrides {name} config option)".format( help=help_str, name=name, ), ) group.addoption( - '--reportportal', - action='store_true', - dest='rp_enabled', - default=False, - help='Enable ReportPortal plugin' + "--reportportal", action="store_true", dest="rp_enabled", default=False, help="Enable ReportPortal plugin" ) add_shared_option( - name='rp_launch', - help_str='Launch name', - default='Pytest Launch', + name="rp_launch", + help_str="Launch name", + default="Pytest Launch", ) add_shared_option( - name='rp_launch_id', - help_str='Use already existing launch-id. The plugin won\'t control ' - 'the Launch status', + name="rp_launch_id", + help_str="Use already existing launch-id. The plugin won't control " "the Launch status", ) add_shared_option( - name='rp_launch_description', - help_str='Launch description', - default='', + name="rp_launch_description", + help_str="Launch description", + default="", ) - add_shared_option(name='rp_project', help_str='Project name') + add_shared_option(name="rp_project", help_str="Project name") add_shared_option( - name='rp_log_level', - help_str='Logging level for automated log records reporting', + name="rp_log_level", + help_str="Logging level for automated log records reporting", ) add_shared_option( - name='rp_log_format', - help_str='Logging format for automated log records reporting', + name="rp_log_format", + help_str="Logging format for automated log records reporting", ) add_shared_option( - name='rp_rerun', - help_str='Marks the launch as a rerun', + name="rp_rerun", + help_str="Marks the launch as a rerun", default=False, - action='store_true', + action="store_true", ) add_shared_option( - name='rp_rerun_of', - help_str='ID of the launch to be marked as a rerun (use only with ' - 'rp_rerun=True)', - default='', + name="rp_rerun_of", + help_str="ID of the launch to be marked as a rerun (use only with " "rp_rerun=True)", + default="", ) add_shared_option( - name='rp_parent_item_id', - help_str='Create all test item as child items of the given (already ' - 'existing) item.', + name="rp_parent_item_id", + help_str="Create all test item as child items of the given (already " "existing) item.", ) - add_shared_option(name='rp_uuid', help_str='Deprecated: use `rp_api_key` ' - 'instead.') + add_shared_option(name="rp_uuid", help_str="Deprecated: use `rp_api_key` " "instead.") + add_shared_option(name="rp_api_key", help_str="API key of Report Portal. Usually located on UI profile " "page.") + add_shared_option(name="rp_endpoint", help_str="Server endpoint") + add_shared_option(name="rp_mode", help_str="Visibility of current launch [DEFAULT, DEBUG]", default="DEFAULT") add_shared_option( - name='rp_api_key', - help_str='API key of Report Portal. Usually located on UI profile ' - 'page.' - ) - add_shared_option(name='rp_endpoint', help_str='Server endpoint') - add_shared_option( - name='rp_mode', - help_str='Visibility of current launch [DEFAULT, DEBUG]', - default='DEFAULT' - ) - add_shared_option( - name='rp_thread_logging', - help_str='EXPERIMENTAL: Report logs from threads. ' - 'This option applies a patch to the builtin Thread class, ' - 'and so it is turned off by default. Use with caution.', + name="rp_thread_logging", + help_str="EXPERIMENTAL: Report logs from threads. " + "This option applies a patch to the builtin Thread class, " + "and so it is turned off by default. Use with caution.", default=False, - action='store_true' + action="store_true", ) add_shared_option( - name='rp_launch_uuid_print', - help_str='Enables printing Launch UUID on test run start. Possible values: [True, False]' + name="rp_launch_uuid_print", + help_str="Enables printing Launch UUID on test run start. Possible values: [True, False]", ) add_shared_option( - name='rp_launch_uuid_print_output', - help_str='Launch UUID print output. Default `stdout`. Possible values: [stderr, stdout]' + name="rp_launch_uuid_print_output", + help_str="Launch UUID print output. Default `stdout`. Possible values: [stderr, stdout]", ) + parser.addini("rp_launch_attributes", type="args", help="Launch attributes, i.e Performance Regression") + parser.addini("rp_tests_attributes", type="args", help="Attributes for all tests items, e.g. Smoke") + parser.addini("rp_log_batch_size", default="20", help="Size of batch log requests in async mode") parser.addini( - 'rp_launch_attributes', - type='args', - help='Launch attributes, i.e Performance Regression') - parser.addini( - 'rp_tests_attributes', - type='args', - help='Attributes for all tests items, e.g. Smoke') - parser.addini( - 'rp_log_batch_size', - default='20', - help='Size of batch log requests in async mode') - parser.addini( - 'rp_log_batch_payload_size', + "rp_log_batch_payload_size", default=str(MAX_LOG_BATCH_PAYLOAD_SIZE), - help='Maximum payload size in bytes of async batch log requests') - parser.addini( - 'rp_ignore_attributes', - type='args', - help='Ignore specified pytest markers, i.e parametrize') - parser.addini( - 'rp_is_skipped_an_issue', - default=True, - type='bool', - help='Treat skipped tests as required investigation') - parser.addini( - 'rp_hierarchy_code', - default=False, - type='bool', - help='Enables hierarchy for code') - parser.addini( - 'rp_hierarchy_dirs_level', - default='0', - help='Directory starting hierarchy level') + help="Maximum payload size in bytes of async batch log requests", + ) + parser.addini("rp_ignore_attributes", type="args", help="Ignore specified pytest markers, i.e parametrize") parser.addini( - 'rp_hierarchy_dirs', - default=False, - type='bool', - help='Enables hierarchy for directories') + "rp_is_skipped_an_issue", default=True, type="bool", help="Treat skipped tests as required investigation" + ) + parser.addini("rp_hierarchy_code", default=False, type="bool", help="Enables hierarchy for code") + parser.addini("rp_hierarchy_dirs_level", default="0", help="Directory starting hierarchy level") + parser.addini("rp_hierarchy_dirs", default=False, type="bool", help="Enables hierarchy for directories") parser.addini( - 'rp_hierarchy_dir_path_separator', + "rp_hierarchy_dir_path_separator", default=os.path.sep, - help='Path separator to display directories in test hierarchy') - parser.addini( - 'rp_hierarchy_test_file', - default=True, - type='bool', - help='Show file name in hierarchy') - parser.addini( - 'rp_issue_system_url', - default='', - help='URL to get issue description. Issue id from pytest mark will be added to this URL. ' - 'Deprecated: use "rp_bts_issue_url".') - parser.addini( - 'rp_bts_issue_url', - default='', - help='URL to get issue description. Issue ID from pytest mark will be added to this URL by replacing ' - '"{issue_id}" placeholder.') - parser.addini( - 'rp_bts_project', - default='', - help='Bug-tracking system project as it configured on Report Portal ' - 'server. To enable runtime external issue reporting you need to ' - 'specify this and "rp_bts_url" property.') - parser.addini( - 'rp_bts_url', - default='', - help='URL of bug-tracking system as it configured on Report Portal ' - 'server. To enable runtime external issue reporting you need to ' - 'specify this and "rp_bts_project" property.') - parser.addini( - 'rp_verify_ssl', - default='True', - help='True/False - verify HTTPS calls, or path to a CA_BUNDLE or ' - 'directory with certificates of trusted CAs.') - parser.addini( - 'rp_issue_id_marks', - type='bool', - default=True, - help='Add tag with issue id to the test') + help="Path separator to display directories in test hierarchy", + ) + parser.addini("rp_hierarchy_test_file", default=True, type="bool", help="Show file name in hierarchy") parser.addini( - 'retries', - default='0', - help='Deprecated: use `rp_api_retries` instead') + "rp_issue_system_url", + default="", + help="URL to get issue description. Issue id from pytest mark will be added to this URL. " + 'Deprecated: use "rp_bts_issue_url".', + ) parser.addini( - 'rp_api_retries', - default='0', - help='Amount of retries for performing REST calls to RP server') + "rp_bts_issue_url", + default="", + help="URL to get issue description. Issue ID from pytest mark will be added to this URL by replacing " + '"{issue_id}" placeholder.', + ) parser.addini( - 'rp_skip_connection_test', - default=False, - type='bool', - help='Skip Report Portal connection test') + "rp_bts_project", + default="", + help="Bug-tracking system project as it configured on Report Portal " + "server. To enable runtime external issue reporting you need to " + 'specify this and "rp_bts_url" property.', + ) parser.addini( - 'rp_launch_timeout', - default=86400, - help='Maximum time to wait for child processes finish, default value: ' - '86400 seconds (1 day)' + "rp_bts_url", + default="", + help="URL of bug-tracking system as it configured on Report Portal " + "server. To enable runtime external issue reporting you need to " + 'specify this and "rp_bts_project" property.', ) parser.addini( - 'rp_client_type', - help='Type of the under-the-hood ReportPortal client implementation. Possible values: [SYNC, ASYNC_THREAD, ' - 'ASYNC_BATCHED]' + "rp_verify_ssl", + default="True", + help="True/False - verify HTTPS calls, or path to a CA_BUNDLE or " + "directory with certificates of trusted CAs.", ) + parser.addini("rp_issue_id_marks", type="bool", default=True, help="Add tag with issue id to the test") + parser.addini("retries", default="0", help="Deprecated: use `rp_api_retries` instead") + parser.addini("rp_api_retries", default="0", help="Amount of retries for performing REST calls to RP server") + parser.addini("rp_skip_connection_test", default=False, type="bool", help="Skip Report Portal connection test") parser.addini( - 'rp_connect_timeout', - help='Connection timeout to ReportPortal server' + "rp_launch_timeout", + default=86400, + help="Maximum time to wait for child processes finish, default value: " "86400 seconds (1 day)", ) parser.addini( - 'rp_read_timeout', - help='Response read timeout for ReportPortal connection' + "rp_client_type", + help="Type of the under-the-hood ReportPortal client implementation. Possible values: [SYNC, ASYNC_THREAD, " + "ASYNC_BATCHED]", ) + parser.addini("rp_connect_timeout", help="Connection timeout to ReportPortal server") + parser.addini("rp_read_timeout", help="Response read timeout for ReportPortal connection") parser.addini( - 'rp_report_fixtures', + "rp_report_fixtures", default=False, - type='bool', - help='Enable reporting fixtures as test items. Possible values: [True, False]' + type="bool", + help="Enable reporting fixtures as test items. Possible values: [True, False]", ) diff --git a/pytest_reportportal/rp_logging.py b/pytest_reportportal/rp_logging.py index 650082f..d6ce9a0 100644 --- a/pytest_reportportal/rp_logging.py +++ b/pytest_reportportal/rp_logging.py @@ -13,25 +13,24 @@ """RPLogger class for low-level logging in tests.""" -import sys import logging +import sys import threading from contextlib import contextmanager from functools import wraps from typing import Any -from reportportal_client import current, set_current -from reportportal_client import RPLogger +from reportportal_client import RPLogger, current, set_current from reportportal_client.core.worker import APIWorker def is_api_worker(target): """Check if target is an RP worker thread.""" if target: - method_name = getattr(target, '__name__', None) - method_self = getattr(target, '__self__', None) - if method_name == '_monitor' and method_self: - clazz = getattr(method_self, '__class__', None) + method_name = getattr(target, "__name__", None) + method_self = getattr(target, "__self__", None) + if method_name == "_monitor" and method_self: + clazz = getattr(method_self, "__class__", None) if clazz is APIWorker: return True return False @@ -51,12 +50,13 @@ def patching_thread_class(config): original_start = threading.Thread.start original_run = threading.Thread.run try: + def wrap_start(original_func): @wraps(original_func) def _start(self, *args, **kwargs): """Save the invoking thread's client if there is one.""" # Prevent an endless loop of workers being spawned - target = getattr(self, '_target', None) + target = getattr(self, "_target", None) if not is_api_worker(self) and not is_api_worker(target): current_client = current() self.parent_rp_client = current_client @@ -69,11 +69,7 @@ def wrap_run(original_func): def _run(self, *args, **kwargs): """Create a new client for the invoked thread.""" client = None - if ( - hasattr(self, "parent_rp_client") - and self.parent_rp_client - and not current() - ): + if hasattr(self, "parent_rp_client") and self.parent_rp_client and not current(): parent = self.parent_rp_client client = parent.clone() try: @@ -115,44 +111,43 @@ def patching_logger_class(): original_makeRecord = logger_class.makeRecord try: + def wrap_log(original_func): @wraps(original_func) def _log(self, *args: list[Any], **kwargs: dict[str, Any]): my_kwargs = kwargs.copy() - attachment = my_kwargs.pop('attachment', None) + attachment = my_kwargs.pop("attachment", None) if attachment is not None: - my_kwargs.setdefault('extra', {}).update({'attachment': attachment}) + my_kwargs.setdefault("extra", {}).update({"attachment": attachment}) # Python 3.11 start catches stack frames in wrappers, # so add additional stack level skip to not show it if sys.version_info >= (3, 11): - if 'stacklevel' in my_kwargs: - my_kwargs['stacklevel'] = my_kwargs['stacklevel'] + 1 + if "stacklevel" in my_kwargs: + my_kwargs["stacklevel"] = my_kwargs["stacklevel"] + 1 else: - my_kwargs['stacklevel'] = 2 + my_kwargs["stacklevel"] = 2 return original_func(self, *args, **my_kwargs) else: return original_func(self, *args, **my_kwargs) + return _log def wrap_makeRecord(original_func): @wraps(original_func) - def makeRecord(self, name, level, fn, lno, msg, args, exc_info, - func=None, extra=None, sinfo=None): + def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): if extra is not None: - attachment = extra.pop('attachment', None) + attachment = extra.pop("attachment", None) else: attachment = None try: # Python 3.5 - record = original_func(self, name, level, fn, lno, msg, - args, exc_info, func=func, - extra=extra, sinfo=sinfo) + record = original_func( + self, name, level, fn, lno, msg, args, exc_info, func=func, extra=extra, sinfo=sinfo + ) except TypeError: # Python 2.7 - record = original_func(self, name, level, fn, lno, msg, - args, exc_info, func=func, - extra=extra) + record = original_func(self, name, level, fn, lno, msg, args, exc_info, func=func, extra=extra) record.attachment = attachment return record diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 6a13d86..6dc5605 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -19,15 +19,15 @@ import threading from functools import wraps from os import curdir -from time import time, sleep -from typing import List, Any, Optional, Set, Dict, Tuple, Union, Callable, Generator +from time import sleep, time +from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Union from _pytest.doctest import DoctestItem -from aenum import auto, Enum, unique +from aenum import Enum, auto, unique from py.path import local -from pytest import Class, Function, Module, Package, Item, Session, PytestWarning +from pytest import Class, Function, Item, Module, Package, PytestWarning, Session from reportportal_client.aio import Task -from reportportal_client.core.rp_issues import Issue, ExternalIssue +from reportportal_client.core.rp_issues import ExternalIssue, Issue from reportportal_client.helpers import timestamp from .config import AgentConfig @@ -37,30 +37,25 @@ from pytest import Instance except ImportError: # in pytest >= 7.0 this type was removed - Instance = type('dummy', (), {}) + Instance = type("dummy", (), {}) try: from pytest import Dir except ImportError: # in pytest < 8.0 there is no such type - Dir = type('dummy', (), {}) + Dir = type("dummy", (), {}) from reportportal_client import RP, create_client -from reportportal_client.helpers import ( - dict_to_payload, - gen_attributes, - get_launch_sys_attrs, - get_package_version -) +from reportportal_client.helpers import dict_to_payload, gen_attributes, get_launch_sys_attrs, get_package_version LOGGER = logging.getLogger(__name__) MAX_ITEM_NAME_LENGTH: int = 1024 -TRUNCATION_STR: str = '...' +TRUNCATION_STR: str = "..." ROOT_DIR: str = str(os.path.abspath(curdir)) -PYTEST_MARKS_IGNORE: Set[str] = {'parametrize', 'usefixtures', 'filterwarnings'} -NOT_ISSUE: Issue = Issue('NOT_ISSUE') -ISSUE_DESCRIPTION_LINE_TEMPLATE: str = '* {}:{}' -ISSUE_DESCRIPTION_URL_TEMPLATE: str = ' [{issue_id}]({url})' -ISSUE_DESCRIPTION_ID_TEMPLATE: str = ' {issue_id}' +PYTEST_MARKS_IGNORE: Set[str] = {"parametrize", "usefixtures", "filterwarnings"} +NOT_ISSUE: Issue = Issue("NOT_ISSUE") +ISSUE_DESCRIPTION_LINE_TEMPLATE: str = "* {}:{}" +ISSUE_DESCRIPTION_URL_TEMPLATE: str = " [{issue_id}]({url})" +ISSUE_DESCRIPTION_ID_TEMPLATE: str = " {issue_id}" def trim_docstring(docstring: str) -> str: @@ -71,7 +66,7 @@ def trim_docstring(docstring: str) -> str: :return: trimmed docstring """ if not docstring: - return '' + return "" # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() @@ -92,7 +87,7 @@ def trim_docstring(docstring: str) -> str: while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: - return '\n'.join(trimmed) + return "\n".join(trimmed) @unique @@ -148,10 +143,10 @@ def __init__(self, agent_config: AgentConfig) -> None: self._config = agent_config self._issue_types = {} self._tree_path = {} - self._log_levels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR') + self._log_levels = ("TRACE", "DEBUG", "INFO", "WARN", "ERROR") self._start_tracker = set() self._launch_id = None - self.agent_name = 'pytest-reportportal' + self.agent_name = "pytest-reportportal" self.agent_version = get_package_version(self.agent_name) self.ignored_attributes = [] self.parent_item_id = None @@ -182,8 +177,7 @@ def _get_launch_attributes(self, ini_attrs: Optional[List[Dict[str, str]]]) -> L """ attributes = ini_attrs or [] system_attributes = get_launch_sys_attrs() - system_attributes['agent'] = ( - '{}|{}'.format(self.agent_name, self.agent_version)) + system_attributes["agent"] = "{}|{}".format(self.agent_name, self.agent_version) return attributes + dict_to_payload(system_attributes) def _build_start_launch_rq(self) -> Dict[str, Any]: @@ -191,12 +185,12 @@ def _build_start_launch_rq(self) -> Dict[str, Any]: attributes = gen_attributes(rp_launch_attributes) if rp_launch_attributes else None start_rq = { - 'attributes': self._get_launch_attributes(attributes), - 'name': self._config.rp_launch, - 'start_time': timestamp(), - 'description': self._config.rp_launch_description, - 'rerun': self._config.rp_rerun, - 'rerun_of': self._config.rp_rerun_of + "attributes": self._get_launch_attributes(attributes), + "name": self._config.rp_launch, + "start_time": timestamp(), + "description": self._config.rp_launch_description, + "rerun": self._config.rp_rerun, + "rerun_of": self._config.rp_rerun_of, } return start_rq @@ -208,9 +202,9 @@ def start_launch(self) -> Optional[str]: :return: item ID """ sl_pt = self._build_start_launch_rq() - LOGGER.debug('ReportPortal - Start launch: request_body=%s', sl_pt) + LOGGER.debug("ReportPortal - Start launch: request_body=%s", sl_pt) self._launch_id = self.rp.start_launch(**sl_pt) - LOGGER.debug('ReportPortal - Launch started: id=%s', self._launch_id) + LOGGER.debug("ReportPortal - Launch started: id=%s", self._launch_id) return self._launch_id def _get_item_dirs(self, item: Item) -> List[local]: @@ -241,8 +235,9 @@ def _get_tree_path(self, item: Item) -> List[Item]: path.reverse() return path - def _get_leaf(self, leaf_type, parent_item: Optional[Dict[str, Any]], item: Optional[Item], - item_id: Optional[str] = None) -> Dict[str, Any]: + def _get_leaf( + self, leaf_type, parent_item: Optional[Dict[str, Any]], item: Optional[Item], item_id: Optional[str] = None + ) -> Dict[str, Any]: """Construct a leaf for the itest tree. :param leaf_type: the leaf type @@ -251,9 +246,13 @@ def _get_leaf(self, leaf_type, parent_item: Optional[Dict[str, Any]], item: Opti :return: a leaf """ return { - 'children': {}, 'type': leaf_type, 'item': item, - 'parent': parent_item, 'lock': threading.Lock(), - 'exec': ExecStatus.CREATED, 'item_id': item_id + "children": {}, + "type": leaf_type, + "item": item, + "parent": parent_item, + "lock": threading.Lock(), + "exec": ExecStatus.CREATED, + "item_id": item_id, } def _build_test_tree(self, session: Session) -> Dict[str, Any]: @@ -270,7 +269,7 @@ def _build_test_tree(self, session: Session) -> Dict[str, Any]: current_leaf = test_tree for i, leaf in enumerate(dir_path + class_path): - children_leafs = current_leaf['children'] + children_leafs = current_leaf["children"] leaf_type = LeafType.DIR if i == len(dir_path): @@ -284,83 +283,83 @@ def _build_test_tree(self, session: Session) -> Dict[str, Any]: return test_tree def _remove_root_dirs(self, test_tree: Dict[str, Any], max_dir_level: int, dir_level: int = 0) -> None: - if test_tree['type'] == LeafType.ROOT: - items = list(test_tree['children'].items()) + if test_tree["type"] == LeafType.ROOT: + items = list(test_tree["children"].items()) for item, child_leaf in items: self._remove_root_dirs(child_leaf, max_dir_level, 1) return - if test_tree['type'] == LeafType.DIR and dir_level <= max_dir_level: + if test_tree["type"] == LeafType.DIR and dir_level <= max_dir_level: new_level = dir_level + 1 - parent_leaf = test_tree['parent'] - current_item = test_tree['item'] - del parent_leaf['children'][current_item] - for item, child_leaf in test_tree['children'].items(): - parent_leaf['children'][item] = child_leaf - child_leaf['parent'] = parent_leaf + parent_leaf = test_tree["parent"] + current_item = test_tree["item"] + del parent_leaf["children"][current_item] + for item, child_leaf in test_tree["children"].items(): + parent_leaf["children"][item] = child_leaf + child_leaf["parent"] = parent_leaf self._remove_root_dirs(child_leaf, max_dir_level, new_level) def _remove_file_names(self, test_tree: Dict[str, Any]) -> None: - if test_tree['type'] != LeafType.FILE: - items = list(test_tree['children'].items()) + if test_tree["type"] != LeafType.FILE: + items = list(test_tree["children"].items()) for item, child_leaf in items: self._remove_file_names(child_leaf) return if not self._config.rp_hierarchy_test_file: - parent_leaf = test_tree['parent'] - current_item = test_tree['item'] - del parent_leaf['children'][current_item] - for item, child_leaf in test_tree['children'].items(): - parent_leaf['children'][item] = child_leaf - child_leaf['parent'] = parent_leaf + parent_leaf = test_tree["parent"] + current_item = test_tree["item"] + del parent_leaf["children"][current_item] + for item, child_leaf in test_tree["children"].items(): + parent_leaf["children"][item] = child_leaf + child_leaf["parent"] = parent_leaf self._remove_file_names(child_leaf) def _generate_names(self, test_tree: Dict[str, Any]) -> None: - if test_tree['type'] == LeafType.ROOT: - test_tree['name'] = 'root' + if test_tree["type"] == LeafType.ROOT: + test_tree["name"] = "root" - if test_tree['type'] == LeafType.DIR: - test_tree['name'] = test_tree['item'].basename + if test_tree["type"] == LeafType.DIR: + test_tree["name"] = test_tree["item"].basename - if test_tree['type'] in {LeafType.CODE, LeafType.FILE}: - item = test_tree['item'] + if test_tree["type"] in {LeafType.CODE, LeafType.FILE}: + item = test_tree["item"] if isinstance(item, Module): - test_tree['name'] = os.path.split(str(item.fspath))[1] + test_tree["name"] = os.path.split(str(item.fspath))[1] else: - test_tree['name'] = item.name + test_tree["name"] = item.name - for item, child_leaf in test_tree['children'].items(): + for item, child_leaf in test_tree["children"].items(): self._generate_names(child_leaf) def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separator: str): - child_items = list(test_tree['children'].items()) - if test_tree['type'] not in leaf_types: + child_items = list(test_tree["children"].items()) + if test_tree["type"] not in leaf_types: for item, child_leaf in child_items: self._merge_leaf_types(child_leaf, leaf_types, separator) - elif len(test_tree['children'].items()) > 0: - parent_leaf = test_tree['parent'] - current_item = test_tree['item'] - current_name = test_tree['name'] - del parent_leaf['children'][current_item] + elif len(test_tree["children"].items()) > 0: + parent_leaf = test_tree["parent"] + current_item = test_tree["item"] + current_name = test_tree["name"] + del parent_leaf["children"][current_item] for item, child_leaf in child_items: - parent_leaf['children'][item] = child_leaf - child_leaf['parent'] = parent_leaf - child_leaf['name'] = current_name + separator + child_leaf['name'] + parent_leaf["children"][item] = child_leaf + child_leaf["parent"] = parent_leaf + child_leaf["name"] = current_name + separator + child_leaf["name"] self._merge_leaf_types(child_leaf, leaf_types, separator) def _merge_dirs(self, test_tree: Dict[str, Any]) -> None: self._merge_leaf_types(test_tree, {LeafType.DIR}, self._config.rp_dir_path_separator) def _merge_code(self, test_tree: Dict[str, Any]) -> None: - self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE}, '::') + self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE}, "::") def _build_item_paths(self, leaf: Dict[str, Any], path: List[Dict[str, Any]]) -> None: - if 'children' in leaf and len(leaf['children']) > 0: + if "children" in leaf and len(leaf["children"]) > 0: path.append(leaf) - for name, child_leaf in leaf['children'].items(): + for name, child_leaf in leaf["children"].items(): self._build_item_paths(child_leaf, path) path.pop() - elif leaf['type'] != LeafType.ROOT: - self._tree_path[leaf['item']] = path + [leaf] + elif leaf["type"] != LeafType.ROOT: + self._tree_path[leaf["item"]] = path + [leaf] @check_rp_enabled def collect_tests(self, session: Session) -> None: @@ -386,9 +385,12 @@ def _truncate_item_name(self, name: str) -> str: :return: truncated to maximum length name if needed """ if len(name) > MAX_ITEM_NAME_LENGTH: - name = name[:MAX_ITEM_NAME_LENGTH - len(TRUNCATION_STR)] + TRUNCATION_STR - LOGGER.warning(PytestWarning( - f'Test leaf ID was truncated to "{name}" because of name size constrains on Report Portal')) + name = name[: MAX_ITEM_NAME_LENGTH - len(TRUNCATION_STR)] + TRUNCATION_STR + LOGGER.warning( + PytestWarning( + f'Test leaf ID was truncated to "{name}" because of name size constrains on Report Portal' + ) + ) return name def _get_item_description(self, test_item): @@ -413,87 +415,86 @@ def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> :param func: a function to execute :return: the result of the function bypassed """ - if 'lock' in leaf: - with leaf['lock']: + if "lock" in leaf: + with leaf["lock"]: return func(leaf) return func(leaf) def _build_start_suite_rq(self, leaf): - code_ref = str(leaf['item']) if leaf['type'] == LeafType.DIR else str(leaf['item'].fspath) - parent_item_id = self._lock(leaf['parent'], lambda p: p.get('item_id')) if 'parent' in leaf else None + code_ref = str(leaf["item"]) if leaf["type"] == LeafType.DIR else str(leaf["item"].fspath) + parent_item_id = self._lock(leaf["parent"], lambda p: p.get("item_id")) if "parent" in leaf else None payload = { - 'name': self._truncate_item_name(leaf['name']), - 'description': self._get_item_description(leaf['item']), - 'start_time': timestamp(), - 'item_type': 'SUITE', - 'code_ref': code_ref, - 'parent_item_id': parent_item_id + "name": self._truncate_item_name(leaf["name"]), + "description": self._get_item_description(leaf["item"]), + "start_time": timestamp(), + "item_type": "SUITE", + "code_ref": code_ref, + "parent_item_id": parent_item_id, } return payload def _start_suite(self, suite_rq): - LOGGER.debug('ReportPortal - Start Suite: request_body=%s', suite_rq) + LOGGER.debug("ReportPortal - Start Suite: request_body=%s", suite_rq) return self.rp.start_test_item(**suite_rq) def _create_suite(self, leaf): - if leaf['exec'] != ExecStatus.CREATED: + if leaf["exec"] != ExecStatus.CREATED: return item_id = self._start_suite(self._build_start_suite_rq(leaf)) - leaf['item_id'] = item_id - leaf['exec'] = ExecStatus.IN_PROGRESS + leaf["item_id"] = item_id + leaf["exec"] = ExecStatus.IN_PROGRESS @check_rp_enabled def _create_suite_path(self, item: Item): path = self._tree_path[item] for leaf in path[1:-1]: - if leaf['exec'] != ExecStatus.CREATED: + if leaf["exec"] != ExecStatus.CREATED: continue self._lock(leaf, lambda p: self._create_suite(p)) def _get_item_name(self, mark) -> Optional[str]: - return mark.kwargs.get('name', mark.args[0] if mark.args else None) + return mark.kwargs.get("name", mark.args[0] if mark.args else None) def _get_code_ref(self, item): # Generate script path from work dir, use only backslashes to have the # same path on different systems and do not affect Test Case ID on # different systems - path = os.path.relpath(str(item.fspath), ROOT_DIR).replace('\\', '/') - method_name = item.originalname if hasattr(item, 'originalname') and item.originalname is not None \ - else item.name + path = os.path.relpath(str(item.fspath), ROOT_DIR).replace("\\", "/") + method_name = ( + item.originalname if hasattr(item, "originalname") and item.originalname is not None else item.name + ) parent = item.parent classes = [method_name] while not isinstance(parent, Module): - if not isinstance(parent, Instance) and hasattr(parent, 'name'): + if not isinstance(parent, Instance) and hasattr(parent, "name"): classes.append(parent.name) - if hasattr(parent, 'parent'): + if hasattr(parent, "parent"): parent = parent.parent else: break classes.reverse() - class_path = '.'.join(classes) - return '{0}:{1}'.format(path, class_path) + class_path = ".".join(classes) + return "{0}:{1}".format(path, class_path) def _get_test_case_id(self, mark, leaf) -> str: - parameters = leaf.get('parameters', None) + parameters = leaf.get("parameters", None) parameterized = True selected_params = None if mark is not None: - parameterized = mark.kwargs.get('parameterized', False) - selected_params = mark.kwargs.get('params', None) - if selected_params is not None and not isinstance(selected_params, - list): + parameterized = mark.kwargs.get("parameterized", False) + selected_params = mark.kwargs.get("params", None) + if selected_params is not None and not isinstance(selected_params, list): selected_params = [selected_params] param_str = None if parameterized and parameters is not None and len(parameters) > 0: if selected_params is not None and len(selected_params) > 0: - param_list = [str(parameters.get(param, None)) for param in - selected_params] + param_list = [str(parameters.get(param, None)) for param in selected_params] else: param_list = [str(param) for param in parameters.values()] - param_str = '[{}]'.format(','.join(sorted(param_list))) + param_str = "[{}]".format(",".join(sorted(param_list))) - basic_name_part = leaf['code_ref'] + basic_name_part = leaf["code_ref"] if mark is None: if param_str is None: return basic_name_part @@ -520,8 +521,7 @@ def _get_issue_urls(self, mark, default_url): if not issue_ids: return None mark_url = mark.kwargs.get("url", None) or default_url - return [mark_url.format(issue_id=issue_id) if mark_url else None - for issue_id in issue_ids] + return [mark_url.format(issue_id=issue_id) if mark_url else None for issue_id in issue_ids] def _get_issue_description_line(self, mark, default_url): issue_ids = self._get_issue_ids(mark) @@ -565,16 +565,20 @@ def _get_issue(self, mark) -> Issue: issue_urls = self._get_issue_urls(mark, default_url) for issue_id, issue_url in zip(issue_ids, issue_urls): issue.external_issue_add( - ExternalIssue(bts_url=self._config.rp_bts_url, bts_project=self._config.rp_bts_project, - ticket_id=issue_id, url=issue_url) + ExternalIssue( + bts_url=self._config.rp_bts_url, + bts_project=self._config.rp_bts_project, + ticket_id=issue_id, + url=issue_url, + ) ) return issue def _to_attribute(self, attribute_tuple): if attribute_tuple[0]: - return {'key': attribute_tuple[0], 'value': attribute_tuple[1]} + return {"key": attribute_tuple[0], "value": attribute_tuple[1]} else: - return {'value': attribute_tuple[1]} + return {"value": attribute_tuple[1]} def _process_item_name(self, leaf: Dict[str, Any]) -> str: """ @@ -583,9 +587,9 @@ def _process_item_name(self, leaf: Dict[str, Any]) -> str: :param leaf: item context :return: Item Name string """ - item = leaf['item'] - name = leaf['name'] - names = [m for m in item.iter_markers() if m.name == 'name'] + item = leaf["item"] + name = leaf["name"] + names = [m for m in item.iter_markers() if m.name == "name"] if len(names) > 0: mark_name = self._get_item_name(names[0]) if mark_name: @@ -599,10 +603,10 @@ def _get_parameters(self, item) -> Optional[Dict[str, Any]]: :param item: Pytest.Item :return: dict of params """ - params = item.callspec.params if hasattr(item, 'callspec') else None + params = item.callspec.params if hasattr(item, "callspec") else None if not params: return None - return {str(k): v.replace('\0', '\\0') if isinstance(v, str) else v for k, v in params.items()} + return {str(k): v.replace("\0", "\\0") if isinstance(v, str) else v for k, v in params.items()} def _process_test_case_id(self, leaf): """ @@ -611,7 +615,7 @@ def _process_test_case_id(self, leaf): :param leaf: item context :return: Test Case ID string """ - tc_ids = [m for m in leaf['item'].iter_markers() if m.name == 'tc_id'] + tc_ids = [m for m in leaf["item"].iter_markers() if m.name == "tc_id"] if len(tc_ids) > 0: return self._get_test_case_id(tc_ids[0], leaf) return self._get_test_case_id(None, leaf) @@ -623,7 +627,7 @@ def _process_issue(self, item) -> Issue: :param item: Pytest.Item :return: Issue """ - issues = [m for m in item.iter_markers() if m.name == 'issue'] + issues = [m for m in item.iter_markers() if m.name == "issue"] if len(issues) > 0: return self._get_issue(issues[0]) @@ -636,15 +640,14 @@ def _process_attributes(self, item): """ attributes = set() for marker in item.iter_markers(): - if marker.name == 'issue': + if marker.name == "issue": if self._config.rp_issue_id_marks: for issue_id in self._get_issue_ids(marker): attributes.add((marker.name, issue_id)) continue - if marker.name == 'name': + if marker.name == "name": continue - if marker.name in self._config.rp_ignore_attributes \ - or marker.name in PYTEST_MARKS_IGNORE: + if marker.name in self._config.rp_ignore_attributes or marker.name in PYTEST_MARKS_IGNORE: continue if len(marker.args) > 0: attributes.add((marker.name, str(marker.args[0]))) @@ -659,13 +662,13 @@ def _process_metadata_item_start(self, leaf: Dict[str, Any]) -> None: :param leaf: item context """ - item = leaf['item'] - leaf['name'] = self._process_item_name(leaf) - leaf['parameters'] = self._get_parameters(item) - leaf['code_ref'] = self._get_code_ref(item) - leaf['test_case_id'] = self._process_test_case_id(leaf) - leaf['issue'] = self._process_issue(item) - leaf['attributes'] = self._process_attributes(item) + item = leaf["item"] + leaf["name"] = self._process_item_name(leaf) + leaf["parameters"] = self._get_parameters(item) + leaf["code_ref"] = self._get_code_ref(item) + leaf["test_case_id"] = self._process_test_case_id(leaf) + leaf["issue"] = self._process_issue(item) + leaf["attributes"] = self._process_attributes(item) def _process_metadata_item_finish(self, leaf: Dict[str, Any]) -> None: """ @@ -673,30 +676,30 @@ def _process_metadata_item_finish(self, leaf: Dict[str, Any]) -> None: :param leaf: item context """ - item = leaf['item'] - leaf['attributes'] = self._process_attributes(item) - leaf['issue'] = self._process_issue(item) + item = leaf["item"] + leaf["attributes"] = self._process_attributes(item) + leaf["issue"] = self._process_issue(item) def _build_start_step_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: payload = { - 'attributes': leaf.get('attributes', None), - 'name': self._truncate_item_name(leaf['name']), - 'description': self._get_item_description(leaf['item']), - 'start_time': timestamp(), - 'item_type': 'STEP', - 'code_ref': leaf.get('code_ref', None), - 'parameters': leaf.get('parameters', None), - 'parent_item_id': self._lock(leaf['parent'], lambda p: p['item_id']), - 'test_case_id': leaf.get('test_case_id', None) + "attributes": leaf.get("attributes", None), + "name": self._truncate_item_name(leaf["name"]), + "description": self._get_item_description(leaf["item"]), + "start_time": timestamp(), + "item_type": "STEP", + "code_ref": leaf.get("code_ref", None), + "parameters": leaf.get("parameters", None), + "parent_item_id": self._lock(leaf["parent"], lambda p: p["item_id"]), + "test_case_id": leaf.get("test_case_id", None), } return payload def _start_step(self, step_rq): - LOGGER.debug('ReportPortal - Start TestItem: request_body=%s', step_rq) + LOGGER.debug("ReportPortal - Start TestItem: request_body=%s", step_rq) return self.rp.start_test_item(**step_rq) def __unique_id(self): - return str(os.getpid()) + '-' + str(threading.current_thread().ident) + return str(os.getpid()) + "-" + str(threading.current_thread().ident) def __started(self): return self.__unique_id() in self._start_tracker @@ -719,8 +722,8 @@ def start_pytest_item(self, test_item: Optional[Item] = None): current_leaf = self._tree_path[test_item][-1] self._process_metadata_item_start(current_leaf) item_id = self._start_step(self._build_start_step_rq(current_leaf)) - current_leaf['item_id'] = item_id - current_leaf['exec'] = ExecStatus.IN_PROGRESS + current_leaf["item_id"] = item_id + current_leaf["exec"] = ExecStatus.IN_PROGRESS def process_results(self, test_item, report): """ @@ -730,75 +733,74 @@ def process_results(self, test_item, report): :param report: pytest's result report """ if report.longrepr: - self.post_log(test_item, report.longreprtext, log_level='ERROR') + self.post_log(test_item, report.longreprtext, log_level="ERROR") leaf = self._tree_path[test_item][-1] # Defining test result - if report.when == 'setup': - leaf['status'] = 'PASSED' + if report.when == "setup": + leaf["status"] = "PASSED" if report.failed: - leaf['status'] = 'FAILED' + leaf["status"] = "FAILED" return if report.skipped: - if leaf['status'] in (None, 'PASSED'): - leaf['status'] = 'SKIPPED' + if leaf["status"] in (None, "PASSED"): + leaf["status"] = "SKIPPED" def _build_finish_step_rq(self, leaf): - issue = leaf.get('issue', None) - status = leaf['status'] - if status == 'SKIPPED' and not self._config.rp_is_skipped_an_issue: + issue = leaf.get("issue", None) + status = leaf["status"] + if status == "SKIPPED" and not self._config.rp_is_skipped_an_issue: issue = NOT_ISSUE - if status == 'PASSED': + if status == "PASSED": issue = None payload = { - 'attributes': leaf.get('attributes', None), - 'end_time': timestamp(), - 'status': status, - 'issue': issue, - 'item_id': leaf['item_id'] + "attributes": leaf.get("attributes", None), + "end_time": timestamp(), + "status": status, + "issue": issue, + "item_id": leaf["item_id"], } return payload def _finish_step(self, finish_rq): - LOGGER.debug('ReportPortal - Finish TestItem: request_body=%s', finish_rq) + LOGGER.debug("ReportPortal - Finish TestItem: request_body=%s", finish_rq) self.rp.finish_test_item(**finish_rq) def _finish_suite(self, finish_rq): - LOGGER.debug('ReportPortal - End TestSuite: request_body=%s', finish_rq) + LOGGER.debug("ReportPortal - End TestSuite: request_body=%s", finish_rq) self.rp.finish_test_item(**finish_rq) def _build_finish_suite_rq(self, leaf): - payload = { - 'end_time': timestamp(), - 'item_id': leaf['item_id'] - } + payload = {"end_time": timestamp(), "item_id": leaf["item_id"]} return payload def _proceed_suite_finish(self, leaf): - if leaf.get('exec', ExecStatus.FINISHED) == ExecStatus.FINISHED: + if leaf.get("exec", ExecStatus.FINISHED) == ExecStatus.FINISHED: return self._finish_suite(self._build_finish_suite_rq(leaf)) - leaf['exec'] = ExecStatus.FINISHED + leaf["exec"] = ExecStatus.FINISHED def _finish_parents(self, leaf): - if 'parent' not in leaf or leaf['parent'] is None \ - or leaf['parent']['type'] is LeafType.ROOT \ - or leaf['parent'].get('exec', ExecStatus.FINISHED) == \ - ExecStatus.FINISHED: + if ( + "parent" not in leaf + or leaf["parent"] is None + or leaf["parent"]["type"] is LeafType.ROOT + or leaf["parent"].get("exec", ExecStatus.FINISHED) == ExecStatus.FINISHED + ): return - for item, child_leaf in leaf['parent']['children'].items(): - current_status = child_leaf['exec'] + for item, child_leaf in leaf["parent"]["children"].items(): + current_status = child_leaf["exec"] if current_status != ExecStatus.FINISHED: - current_status = self._lock(child_leaf, lambda p: p['exec']) + current_status = self._lock(child_leaf, lambda p: p["exec"]) if current_status != ExecStatus.FINISHED: return - self._lock(leaf['parent'], lambda p: self._proceed_suite_finish(p)) - self._finish_parents(leaf['parent']) + self._lock(leaf["parent"], lambda p: self._proceed_suite_finish(p)) + self._finish_parents(leaf["parent"]) @check_rp_enabled def finish_pytest_item(self, test_item): @@ -812,12 +814,11 @@ def finish_pytest_item(self, test_item): leaf = path[-1] self._process_metadata_item_finish(leaf) self._finish_step(self._build_finish_step_rq(leaf)) - leaf['exec'] = ExecStatus.FINISHED + leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) def _get_items(self, exec_status): - return [k for k, v in self._tree_path.items() if - v[-1]['exec'] == exec_status] + return [k for k, v in self._tree_path.items() if v[-1]["exec"] == exec_status] def finish_suites(self): """ @@ -830,25 +831,24 @@ def finish_suites(self): """ # Ensure there is no running items finish_time = time() - while len(self._get_items(ExecStatus.IN_PROGRESS)) > 0 \ - and time() - finish_time <= self._config.rp_launch_timeout: + while ( + len(self._get_items(ExecStatus.IN_PROGRESS)) > 0 and time() - finish_time <= self._config.rp_launch_timeout + ): sleep(0.1) skipped_items = self._get_items(ExecStatus.CREATED) for item in skipped_items: path = list(self._tree_path[item]) path.reverse() for leaf in path[1:-1]: - if leaf['exec'] == ExecStatus.IN_PROGRESS: + if leaf["exec"] == ExecStatus.IN_PROGRESS: self._lock(leaf, lambda p: self._proceed_suite_finish(p)) def _build_finish_launch_rq(self): - finish_rq = { - 'end_time': timestamp() - } + finish_rq = {"end_time": timestamp()} return finish_rq def _finish_launch(self, finish_rq): - LOGGER.debug('ReportPortal - Finish launch: request_body=%s', finish_rq) + LOGGER.debug("ReportPortal - Finish launch: request_body=%s", finish_rq) self.rp.finish_launch(**finish_rq) @check_rp_enabled @@ -863,17 +863,17 @@ def finish_launch(self): def _build_log(self, item_id: str, message: str, log_level: str, attachment: Optional[Any] = None): sl_rq = { - 'item_id': item_id, - 'time': timestamp(), - 'message': message, - 'level': log_level, + "item_id": item_id, + "time": timestamp(), + "message": message, + "level": log_level, } if attachment: - sl_rq['attachment'] = attachment + sl_rq["attachment"] = attachment return sl_rq @check_rp_enabled - def post_log(self, test_item, message: str, log_level: str = 'INFO', attachment: Optional[Any] = None): + def post_log(self, test_item, message: str, log_level: str = "INFO", attachment: Optional[Any] = None): """ Send a log message to the Report Portal. @@ -885,9 +885,10 @@ def post_log(self, test_item, message: str, log_level: str = 'INFO', attachment: :return: None """ if log_level not in self._log_levels: - LOGGER.warning('Incorrect loglevel = %s. Force set to INFO. ' - 'Available levels: %s.', log_level, self._log_levels) - item_id = self._tree_path[test_item][-1]['item_id'] + LOGGER.warning( + "Incorrect loglevel = %s. Force set to INFO. " "Available levels: %s.", log_level, self._log_levels + ) + item_id = self._tree_path[test_item][-1]["item_id"] sl_rq = self._build_log(item_id, message, log_level, attachment) self.rp.log(**sl_rq) @@ -908,28 +909,27 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None try: outcome = yield exception = outcome.exception - status = 'PASSED' + status = "PASSED" if exception: - if type(exception).__name__ != 'Skipped': - status = 'FAILED' - self.post_log(name, error_msg, log_level='ERROR') + if type(exception).__name__ != "Skipped": + status = "FAILED" + self.post_log(name, error_msg, log_level="ERROR") reporter.finish_nested_step(item_id, timestamp(), status) except Exception as e: - LOGGER.error('Failed to report fixture: %s', name) + LOGGER.error("Failed to report fixture: %s", name) LOGGER.exception(e) - reporter.finish_nested_step(item_id, timestamp(), 'FAILED') + reporter.finish_nested_step(item_id, timestamp(), "FAILED") def start(self) -> None: """Start servicing Report Portal requests.""" self.parent_item_id = self._config.rp_parent_item_id - self.ignored_attributes = list( - set( - self._config.rp_ignore_attributes or [] - ).union({'parametrize'}) + self.ignored_attributes = list(set(self._config.rp_ignore_attributes or []).union({"parametrize"})) + LOGGER.debug( + "ReportPortal - Init service: endpoint=%s, " "project=%s, api_key=%s", + self._config.rp_endpoint, + self._config.rp_project, + self._config.rp_api_key, ) - LOGGER.debug('ReportPortal - Init service: endpoint=%s, ' - 'project=%s, api_key=%s', self._config.rp_endpoint, - self._config.rp_project, self._config.rp_api_key) launch_id = self._launch_id if self._config.rp_launch_id: launch_id = self._config.rp_launch_id @@ -947,7 +947,7 @@ def start(self) -> None: launch_uuid_print=self._config.rp_launch_uuid_print, print_output=self._config.rp_launch_uuid_print_output, http_timeout=self._config.rp_http_timeout, - mode=self._config.rp_mode + mode=self._config.rp_mode, ) if hasattr(self.rp, "get_project_settings"): self.project_settings = self.rp.get_project_settings() diff --git a/setup.py b/setup.py index 3f33715..f5758a9 100644 --- a/setup.py +++ b/setup.py @@ -17,8 +17,7 @@ from setuptools import setup - -__version__ = '5.5.0' +__version__ = "5.5.0" def read_file(fname): @@ -32,31 +31,31 @@ def read_file(fname): setup( - name='pytest-reportportal', + name="pytest-reportportal", version=__version__, - description='Agent for Reporting results of tests to the Report Portal', - long_description=read_file('README.md'), - long_description_content_type='text/markdown', - author='Report Portal Team', - author_email='support@reportportal.io', - url='https://github.com/reportportal/agent-python-pytest', - packages=['pytest_reportportal'], - package_data={'pytest_reportportal': ['*.pyi']}, - install_requires=read_file('requirements.txt').splitlines(), - license='Apache 2.0', - keywords=['testing', 'reporting', 'reportportal', 'pytest', 'agent'], + description="Agent for Reporting results of tests to the Report Portal", + long_description=read_file("README.md"), + long_description_content_type="text/markdown", + author="Report Portal Team", + author_email="support@reportportal.io", + url="https://github.com/reportportal/agent-python-pytest", + packages=["pytest_reportportal"], + package_data={"pytest_reportportal": ["*.pyi"]}, + install_requires=read_file("requirements.txt").splitlines(), + license="Apache 2.0", + keywords=["testing", "reporting", "reportportal", "pytest", "agent"], classifiers=[ - 'Framework :: Pytest', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Programming Language :: Python :: 3.12', - 'Programming Language :: Python :: 3.13' - ], + "Framework :: Pytest", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + ], entry_points={ - 'pytest11': [ - 'pytest_reportportal = pytest_reportportal.plugin', + "pytest11": [ + "pytest_reportportal = pytest_reportportal.plugin", ] - } + }, ) diff --git a/tests/__init__.py b/tests/__init__.py index f7f99a7..5ada03e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -13,5 +13,5 @@ """This package contains tests for the project.""" -REPORT_PORTAL_SERVICE = 'reportportal_client.RPClient' -REQUESTS_SERVICE = 'reportportal_client.client.requests.Session' +REPORT_PORTAL_SERVICE = "reportportal_client.RPClient" +REQUESTS_SERVICE = "reportportal_client.client.requests.Session" diff --git a/tests/helpers/utils.py b/tests/helpers/utils.py index 5046575..babb799 100644 --- a/tests/helpers/utils.py +++ b/tests/helpers/utils.py @@ -21,67 +21,67 @@ import pytest DEFAULT_VARIABLES = { - 'rp_launch': 'Pytest', - 'rp_endpoint': 'http://localhost:8080', - 'rp_project': 'default_personal', - 'rp_api_key': 'test_api_key', - 'rp_skip_connection_test': 'True' + "rp_launch": "Pytest", + "rp_endpoint": "http://localhost:8080", + "rp_project": "default_personal", + "rp_api_key": "test_api_key", + "rp_skip_connection_test": "True", } DEFAULT_PROJECT_SETTINGS = { - 'project': 2, - 'subTypes': { - 'NO_DEFECT': [ + "project": 2, + "subTypes": { + "NO_DEFECT": [ { - 'id': 4, - 'locator': 'nd001', - 'typeRef': 'NO_DEFECT', - 'longName': 'No Defect', - 'shortName': 'ND', - 'color': "#777777" + "id": 4, + "locator": "nd001", + "typeRef": "NO_DEFECT", + "longName": "No Defect", + "shortName": "ND", + "color": "#777777", } ], - 'TO_INVESTIGATE': [ + "TO_INVESTIGATE": [ { - 'id': 1, - 'locator': 'ti001', - 'typeRef': 'TO_INVESTIGATE', - 'longName': 'To Investigate', - 'shortName': 'TI', - 'color': '#ffb743' + "id": 1, + "locator": "ti001", + "typeRef": "TO_INVESTIGATE", + "longName": "To Investigate", + "shortName": "TI", + "color": "#ffb743", } ], - 'AUTOMATION_BUG': [ + "AUTOMATION_BUG": [ { - 'id': 2, - 'locator': 'ab001', - 'typeRef': 'AUTOMATION_BUG', - 'longName': 'Automation Bug', - 'shortName': 'AB', - 'color': '#f7d63e' + "id": 2, + "locator": "ab001", + "typeRef": "AUTOMATION_BUG", + "longName": "Automation Bug", + "shortName": "AB", + "color": "#f7d63e", } ], - 'PRODUCT_BUG': [ + "PRODUCT_BUG": [ { - 'id': 3, - 'locator': 'pb001', - 'typeRef': 'PRODUCT_BUG', - 'longName': 'Product Bug', - 'shortName': 'PB', - 'color': '#ec3900' + "id": 3, + "locator": "pb001", + "typeRef": "PRODUCT_BUG", + "longName": "Product Bug", + "shortName": "PB", + "color": "#ec3900", } ], - 'SYSTEM_ISSUE': [ + "SYSTEM_ISSUE": [ { - 'id': 5, - 'locator': 'si001', - 'typeRef': 'SYSTEM_ISSUE', - 'longName': 'System Issue', - 'shortName': 'SI', - 'color': '#0274d1' + "id": 5, + "locator": "si001", + "typeRef": "SYSTEM_ISSUE", + "longName": "System Issue", + "shortName": "SI", + "color": "#0274d1", } - ] - } + ], + }, } @@ -98,27 +98,26 @@ def run_pytest_tests(tests, args=None, variables=None): if variables is None: variables = DEFAULT_VARIABLES - arguments = ['--reportportal'] + args + arguments = ["--reportportal"] + args for k, v in variables.items(): - arguments.append('-o') - arguments.append('{0}={1}'.format(k, str(v))) + arguments.append("-o") + arguments.append("{0}={1}".format(k, str(v))) if tests is not None: for t in tests: arguments.append(t) # Workaround collisions with parent test - current_test = os.environ['PYTEST_CURRENT_TEST'] - del os.environ['PYTEST_CURRENT_TEST'] + current_test = os.environ["PYTEST_CURRENT_TEST"] + del os.environ["PYTEST_CURRENT_TEST"] result = pytest.main(arguments) - os.environ['PYTEST_CURRENT_TEST'] = current_test + os.environ["PYTEST_CURRENT_TEST"] = current_test return result def item_id_gen(**kwargs): - return "{}-{}-{}".format(kwargs['name'], str(round(time.time() * 1000)), - random.randint(0, 9999)) + return "{}-{}-{}".format(kwargs["name"], str(round(time.time() * 1000)), random.randint(0, 9999)) def project_settings(**kwargs): @@ -128,10 +127,10 @@ def project_settings(**kwargs): def attributes_to_tuples(attributes): result = set() for attribute in attributes: - if 'key' in attribute: - result.add((attribute['key'], attribute['value'])) + if "key" in attribute: + result.add((attribute["key"], attribute["value"])) else: - result.add((None, attribute['value'])) + result.add((None, attribute["value"])) return result @@ -139,6 +138,7 @@ def attributes_to_tuples(attributes): def run_tests_with_client(client, tests, args=None, variables=None): def test_func(): from reportportal_client import set_current + set_current(client) return run_pytest_tests(tests, args, variables) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e062897..5895d96 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -15,175 +15,257 @@ from tests.helpers import utils -HIERARCHY_TESTS = \ - [ - ['examples/test_simple.py'], - ['examples/hierarchy/inner/test_inner_simple.py'], - ['examples/hierarchy/test_in_class.py'], - ['examples/hierarchy/test_in_class_in_class.py'], - ['examples/hierarchy/another_inner/test_another_inner_simple.py', - 'examples/hierarchy/inner/test_inner_simple.py'] - ] + \ - [['examples/params/test_in_class_parameterized.py']] * 5 + \ - [['examples/hierarchy/inner/test_inner_simple.py']] * 7 + \ - [['examples/hierarchy/test_in_class_in_class.py']] + \ - [['examples/test_simple.py']] * 2 +HIERARCHY_TESTS = ( + [ + ["examples/test_simple.py"], + ["examples/hierarchy/inner/test_inner_simple.py"], + ["examples/hierarchy/test_in_class.py"], + ["examples/hierarchy/test_in_class_in_class.py"], + [ + "examples/hierarchy/another_inner/test_another_inner_simple.py", + "examples/hierarchy/inner/test_inner_simple.py", + ], + ] + + [["examples/params/test_in_class_parameterized.py"]] * 5 + + [["examples/hierarchy/inner/test_inner_simple.py"]] * 7 + + [["examples/hierarchy/test_in_class_in_class.py"]] + + [["examples/test_simple.py"]] * 2 +) # noinspection PyTypeChecker -HIERARCHY_TEST_VARIABLES = \ - [dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES)] * 6 + \ - [ - dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True, 'rp_hierarchy_dirs_level': 1}, - **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True, 'rp_hierarchy_dirs_level': 2}, - **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True, 'rp_hierarchy_dirs_level': 999}, - **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs': True, 'rp_hierarchy_code': True, 'rp_hierarchy_dirs_level': -1}, - **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dir_path_separator': '/', 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dir_path_separator': '\\', 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs_level': 1, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs_level': 2, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs_level': 999, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_dirs_level': -1, 'rp_hierarchy_code': True}, **utils.DEFAULT_VARIABLES), - dict(**utils.DEFAULT_VARIABLES), - dict(**utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_test_file': False}, **utils.DEFAULT_VARIABLES), - dict({'rp_hierarchy_test_file': False, 'rp_hierarchy_dirs_level': 1}, **utils.DEFAULT_VARIABLES) - ] +HIERARCHY_TEST_VARIABLES = [ + dict({"rp_hierarchy_dirs": True, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES) +] * 6 + [ + dict( + {"rp_hierarchy_dirs": True, "rp_hierarchy_code": True, "rp_hierarchy_dirs_level": 1}, **utils.DEFAULT_VARIABLES + ), + dict( + {"rp_hierarchy_dirs": True, "rp_hierarchy_code": True, "rp_hierarchy_dirs_level": 2}, **utils.DEFAULT_VARIABLES + ), + dict( + {"rp_hierarchy_dirs": True, "rp_hierarchy_code": True, "rp_hierarchy_dirs_level": 999}, + **utils.DEFAULT_VARIABLES, + ), + dict( + {"rp_hierarchy_dirs": True, "rp_hierarchy_code": True, "rp_hierarchy_dirs_level": -1}, + **utils.DEFAULT_VARIABLES, + ), + dict({"rp_hierarchy_dir_path_separator": "/", "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dir_path_separator": "\\", "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dirs_level": 1, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dirs_level": 2, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dirs_level": 999, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_dirs_level": -1, "rp_hierarchy_code": True}, **utils.DEFAULT_VARIABLES), + dict(**utils.DEFAULT_VARIABLES), + dict(**utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_test_file": False}, **utils.DEFAULT_VARIABLES), + dict({"rp_hierarchy_test_file": False, "rp_hierarchy_dirs_level": 1}, **utils.DEFAULT_VARIABLES), +] HIERARCHY_TEST_EXPECTED_ITEMS = [ [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('test_simple.py')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "test_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + {"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("test_simple.py")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'hierarchy', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'inner', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('inner')}, - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('test_inner_simple.py')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "hierarchy", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + {"name": "inner", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("hierarchy")}, + {"name": "test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("inner")}, + {"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("test_inner_simple.py")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'hierarchy', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_in_class.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'Tests', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('test_in_class.py')}, - {'name': 'test_in_class', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "hierarchy", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + {"name": "test_in_class.py", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("hierarchy")}, + {"name": "Tests", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("test_in_class.py")}, + {"name": "test_in_class", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("Tests")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'hierarchy', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_in_class_in_class.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_in_class.py')}, - {'name': 'Test', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('Tests')}, - {'name': 'test_in_class_in_class', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('Test')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "hierarchy", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + { + "name": "test_in_class_in_class.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("hierarchy"), + }, + {"name": "Tests", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("test_in_class_in_class.py")}, + {"name": "Test", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("Tests")}, + {"name": "test_in_class_in_class", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("Test")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'hierarchy', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'another_inner', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'test_another_inner_simple.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('another_inner')}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('test_another_inner_simple.py')}, - {'name': 'inner', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('hierarchy')}, - {'name': 'test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('inner')}, - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('test_inner_simple.py')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "hierarchy", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + {"name": "another_inner", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("hierarchy")}, + { + "name": "test_another_inner_simple.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("another_inner"), + }, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("test_another_inner_simple.py"), + }, + {"name": "inner", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("hierarchy")}, + {"name": "test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("inner")}, + {"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("test_inner_simple.py")}, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'params', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('params')}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "params", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + { + "name": "test_in_class_parameterized.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("params"), + }, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'params', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('params')}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "params", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "test_in_class_parameterized.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("params"), + }, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "test_in_class_parameterized.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "test_in_class_parameterized.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'examples', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'params', 'item_type': 'SUITE', 'parent_item_id': lambda x: x.startswith('examples')}, - {'name': 'test_in_class_parameterized.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('params')}, - {'name': 'Tests', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x.startswith('test_in_class_parameterized.py')}, - {'name': 'test_in_class_parameterized[param]', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('Tests')} + {"name": "examples", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "params", "item_type": "SUITE", "parent_item_id": lambda x: x.startswith("examples")}, + { + "name": "test_in_class_parameterized.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("params"), + }, + { + "name": "Tests", + "item_type": "SUITE", + "parent_item_id": lambda x: x.startswith("test_in_class_parameterized.py"), + }, + { + "name": "test_in_class_parameterized[param]", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("Tests"), + }, ], [ - {'name': 'examples/hierarchy/inner/test_inner_simple.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('examples/hierarchy/inner/test_inner_simple.py')} + { + "name": "examples/hierarchy/inner/test_inner_simple.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x is None, + }, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("examples/hierarchy/inner/test_inner_simple.py"), + }, ], [ - {'name': 'examples\\hierarchy\\inner\\test_inner_simple.py', 'item_type': 'SUITE', - 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('examples\\hierarchy\\inner\\test_inner_simple.py')} + { + "name": "examples\\hierarchy\\inner\\test_inner_simple.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x is None, + }, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("examples\\hierarchy\\inner\\test_inner_simple.py"), + }, ], [ - {'name': 'hierarchy/inner/test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('hierarchy/inner/test_inner_simple.py')} + {"name": "hierarchy/inner/test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("hierarchy/inner/test_inner_simple.py"), + }, ], [ - {'name': 'inner/test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('inner/test_inner_simple.py')} + {"name": "inner/test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("inner/test_inner_simple.py"), + }, ], [ - {'name': 'test_inner_simple.py', 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x.startswith('test_inner_simple.py')} + {"name": "test_inner_simple.py", "item_type": "SUITE", "parent_item_id": lambda x: x is None}, + {"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x.startswith("test_inner_simple.py")}, ], [ - {'name': 'examples/hierarchy/inner/test_inner_simple.py', - 'item_type': 'SUITE', 'parent_item_id': lambda x: x is None}, - {'name': 'test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x.startswith('examples/hierarchy/inner/test_inner_simple.py')} + { + "name": "examples/hierarchy/inner/test_inner_simple.py", + "item_type": "SUITE", + "parent_item_id": lambda x: x is None, + }, + { + "name": "test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x.startswith("examples/hierarchy/inner/test_inner_simple.py"), + }, ], [ - {'name': 'examples/hierarchy/inner/test_inner_simple.py::test_simple', 'item_type': 'STEP', - 'parent_item_id': lambda x: x is None} + { + "name": "examples/hierarchy/inner/test_inner_simple.py::test_simple", + "item_type": "STEP", + "parent_item_id": lambda x: x is None, + } ], [ - {'name': 'examples/hierarchy/test_in_class_in_class.py::Tests::Test::test_in_class_in_class', - 'item_type': 'STEP', 'parent_item_id': lambda x: x is None} + { + "name": "examples/hierarchy/test_in_class_in_class.py::Tests::Test::test_in_class_in_class", + "item_type": "STEP", + "parent_item_id": lambda x: x is None, + } ], - [ - {'name': 'examples/test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x is None} - ], - [ - {'name': 'test_simple', 'item_type': 'STEP', 'parent_item_id': lambda x: x is None} - ] + [{"name": "examples/test_simple", "item_type": "STEP", "parent_item_id": lambda x: x is None}], + [{"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x is None}], ] HIERARCHY_TEST_PARAMETERS = [ diff --git a/tests/integration/test_attributes.py b/tests/integration/test_attributes.py index 2c6c4a5..d8fb81a 100644 --- a/tests/integration/test_attributes.py +++ b/tests/integration/test_attributes.py @@ -25,17 +25,17 @@ def test_custom_attribute_report(mock_client_init): :param mock_client_init: Pytest fixture """ - variables = {'markers': 'scope: to which test scope a test relates'} + variables = {"markers": "scope: to which test scope a test relates"} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/attributes/test_one_attribute.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/attributes/test_one_attribute.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['attributes'] == [{'key': 'scope', 'value': 'smoke'}] + assert step_call_args["attributes"] == [{"key": "scope", "value": "smoke"}] @mock.patch(REPORT_PORTAL_SERVICE) @@ -44,20 +44,17 @@ def test_custom_attribute_not_reported_if_skip_configured(mock_client_init): :param mock_client_init: Pytest fixture """ - variables = { - 'markers': 'scope: to which test scope a test relates', - 'rp_ignore_attributes': 'scope' - } + variables = {"markers": "scope: to which test scope a test relates", "rp_ignore_attributes": "scope"} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/attributes/test_one_attribute.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/attributes/test_one_attribute.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['attributes'] == [] + assert step_call_args["attributes"] == [] @mock.patch(REPORT_PORTAL_SERVICE) @@ -66,23 +63,21 @@ def test_two_attributes_different_values_report(mock_client_init): :param mock_client_init: Pytest fixture """ - variables = {'markers': 'scope: to which test scope a test relates'} + variables = {"markers": "scope: to which test scope a test relates"} variables.update(utils.DEFAULT_VARIABLES.items()) result = utils.run_pytest_tests( - tests=['examples/attributes/test_two_attributes_with_same_key.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + tests=["examples/attributes/test_two_attributes_with_same_key.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - actual_attributes = step_call_args['attributes'] + actual_attributes = step_call_args["attributes"] - assert utils.attributes_to_tuples(actual_attributes) == { - ('scope', 'smoke'), - ('scope', 'regression') - } + assert utils.attributes_to_tuples(actual_attributes) == {("scope", "smoke"), ("scope", "regression")} @mock.patch(REPORT_PORTAL_SERVICE) @@ -91,19 +86,17 @@ def test_skip_attribute(mock_client_init): :param mock_client_init: Pytest fixture """ - result = utils.run_pytest_tests(tests=['examples/skip/test_simple_skip.py']) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/skip/test_simple_skip.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - actual_attributes = step_call_args['attributes'] + actual_attributes = step_call_args["attributes"] - assert utils.attributes_to_tuples(actual_attributes) == { - (None, 'skip') - } + assert utils.attributes_to_tuples(actual_attributes) == {(None, "skip")} @mock.patch(REPORT_PORTAL_SERVICE) @@ -112,10 +105,10 @@ def test_custom_runtime_attribute_report(mock_client_init): :param mock_client_init: Pytest fixture """ - variables = {'markers': 'scope: to which test scope a test relates\nruntime: runtime attribute mark'} + variables = {"markers": "scope: to which test scope a test relates\nruntime: runtime attribute mark"} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/attributes/test_runtime_attribute.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/attributes/test_runtime_attribute.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' @@ -123,14 +116,11 @@ def test_custom_runtime_attribute_report(mock_client_init): start_call_args = mock_client.start_test_item.call_args_list start_step_call_args = start_call_args[-1][1] - assert start_step_call_args['attributes'] == [ - {'key': 'scope', 'value': 'smoke'} - ] + assert start_step_call_args["attributes"] == [{"key": "scope", "value": "smoke"}] finish_call_args = mock_client.finish_test_item.call_args_list finish_step_call_args = finish_call_args[-1][1] - actual_attributes = finish_step_call_args['attributes'] - attribute_tuple_list = [(kv.get('key'), kv['value']) - for kv in actual_attributes] + actual_attributes = finish_step_call_args["attributes"] + attribute_tuple_list = [(kv.get("key"), kv["value"]) for kv in actual_attributes] - assert set(attribute_tuple_list) == {('scope', 'smoke'), (None, 'runtime')} + assert set(attribute_tuple_list) == {("scope", "smoke"), (None, "runtime")} diff --git a/tests/integration/test_case_id_report.py b/tests/integration/test_case_id_report.py index 345294e..b1ddffd 100644 --- a/tests/integration/test_case_id_report.py +++ b/tests/integration/test_case_id_report.py @@ -13,33 +13,50 @@ """This module includes integration tests for Test Case ID report.""" -import pytest from unittest import mock -from examples.test_case_id import test_case_id_decorator, \ - test_case_id_decorator_params_false, test_case_id_decorator_params_no, \ - test_case_id_decorator_params_partially, test_case_id_decorator_params_true +import pytest + +from examples.test_case_id import ( + test_case_id_decorator, + test_case_id_decorator_params_false, + test_case_id_decorator_params_no, + test_case_id_decorator_params_partially, + test_case_id_decorator_params_true, +) from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize(['test', 'expected_id'], [ - ('examples/test_simple.py', 'examples/test_simple.py:test_simple'), - ('examples/params/test_in_class_parameterized.py', - 'examples/params/test_in_class_parameterized.py:Tests.test_in_class_parameterized[param]'), - ('examples/test_case_id/test_case_id_decorator.py', test_case_id_decorator.TEST_CASE_ID), - ('examples/test_case_id/test_case_id_decorator_params_false.py', test_case_id_decorator_params_false.TEST_CASE_ID), - ('examples/test_case_id/test_case_id_decorator_params_no.py', test_case_id_decorator_params_no.TEST_CASE_ID), - ('examples/test_case_id/test_case_id_decorator_params_partially.py', - test_case_id_decorator_params_partially.TEST_CASE_ID + '[value1]'), - ('examples/test_case_id/test_case_id_decorator_params_true.py', - test_case_id_decorator_params_true.TEST_CASE_ID + '[value1,value2]'), - ('examples/test_case_id/test_case_id_decorator_no_id.py', ''), - ('examples/test_case_id/test_case_id_decorator_no_id_params_false.py', ''), - ('examples/test_case_id/test_case_id_decorator_no_id_params_true.py', '[value1,value2]'), - ('examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py', '[value2]') -]) +@pytest.mark.parametrize( + ["test", "expected_id"], + [ + ("examples/test_simple.py", "examples/test_simple.py:test_simple"), + ( + "examples/params/test_in_class_parameterized.py", + "examples/params/test_in_class_parameterized.py:Tests.test_in_class_parameterized[param]", + ), + ("examples/test_case_id/test_case_id_decorator.py", test_case_id_decorator.TEST_CASE_ID), + ( + "examples/test_case_id/test_case_id_decorator_params_false.py", + test_case_id_decorator_params_false.TEST_CASE_ID, + ), + ("examples/test_case_id/test_case_id_decorator_params_no.py", test_case_id_decorator_params_no.TEST_CASE_ID), + ( + "examples/test_case_id/test_case_id_decorator_params_partially.py", + test_case_id_decorator_params_partially.TEST_CASE_ID + "[value1]", + ), + ( + "examples/test_case_id/test_case_id_decorator_params_true.py", + test_case_id_decorator_params_true.TEST_CASE_ID + "[value1,value2]", + ), + ("examples/test_case_id/test_case_id_decorator_no_id.py", ""), + ("examples/test_case_id/test_case_id_decorator_no_id_params_false.py", ""), + ("examples/test_case_id/test_case_id_decorator_no_id_params_true.py", "[value1,value2]"), + ("examples/test_case_id/test_case_id_decorator_no_id_partial_params_true.py", "[value2]"), + ], +) def test_parameters(mock_client_init, test, expected_id): """Verify different tests have correct Test Case IDs. @@ -48,11 +65,11 @@ def test_parameters(mock_client_init, test, expected_id): :param expected_id: an expected Test Case ID """ result = utils.run_pytest_tests(tests=[test]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['test_case_id'] == expected_id + assert step_call_args["test_case_id"] == expected_id diff --git a/tests/integration/test_code_reference.py b/tests/integration/test_code_reference.py index 4a3af89..b197e25 100644 --- a/tests/integration/test_code_reference.py +++ b/tests/integration/test_code_reference.py @@ -13,22 +13,30 @@ """This module includes integration tests for code references generation.""" -import pytest from unittest import mock +import pytest + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize(['test', 'code_ref'], [ - ('examples/test_simple.py', 'examples/test_simple.py:test_simple'), - ('examples/params/test_in_class_parameterized.py', - 'examples/params/test_in_class_parameterized.py:Tests.test_in_class_parameterized'), - ('examples/hierarchy/test_in_class.py', 'examples/hierarchy/test_in_class.py:Tests.test_in_class'), - ('examples/hierarchy/test_in_class_in_class.py', - 'examples/hierarchy/test_in_class_in_class.py:Tests.Test.test_in_class_in_class') -]) +@pytest.mark.parametrize( + ["test", "code_ref"], + [ + ("examples/test_simple.py", "examples/test_simple.py:test_simple"), + ( + "examples/params/test_in_class_parameterized.py", + "examples/params/test_in_class_parameterized.py:Tests.test_in_class_parameterized", + ), + ("examples/hierarchy/test_in_class.py", "examples/hierarchy/test_in_class.py:Tests.test_in_class"), + ( + "examples/hierarchy/test_in_class_in_class.py", + "examples/hierarchy/test_in_class_in_class.py:Tests.Test.test_in_class_in_class", + ), + ], +) def test_code_reference(mock_client_init, test, code_ref): """Verify different tests have correct code reference. @@ -37,11 +45,11 @@ def test_code_reference(mock_client_init, test, code_ref): :param code_ref: an expected code reference value """ result = utils.run_pytest_tests(tests=[test]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['code_ref'] == code_ref + assert step_call_args["code_ref"] == code_ref diff --git a/tests/integration/test_config_handling.py b/tests/integration/test_config_handling.py index 1ae30e8..fe27aa8 100644 --- a/tests/integration/test_config_handling.py +++ b/tests/integration/test_config_handling.py @@ -17,14 +17,14 @@ from unittest import mock import pytest -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect from reportportal_client import OutputType from examples.test_rp_logging import LOG_MESSAGE from tests import REPORT_PORTAL_SERVICE, REQUESTS_SERVICE from tests.helpers import utils -TEST_LAUNCH_ID = 'test_launch_id' +TEST_LAUNCH_ID = "test_launch_id" @mock.patch(REQUESTS_SERVICE) @@ -34,16 +34,16 @@ def test_rp_launch_id(mock_requests_init): :param mock_requests_init: mocked requests lib """ variables = dict() - variables['rp_launch_id'] = TEST_LAUNCH_ID + variables["rp_launch_id"] = TEST_LAUNCH_ID variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_simple.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_requests = mock_requests_init.return_value assert mock_requests.post.call_count == 1 item_start = mock_requests.post.call_args_list[0] - assert item_start[0][0].endswith('/item') - assert item_start[1]['json']['launchUuid'] == TEST_LAUNCH_ID + assert item_start[0][0].endswith("/item") + assert item_start[1]["json"]["launchUuid"] == TEST_LAUNCH_ID @mock.patch(REPORT_PORTAL_SERVICE) @@ -54,17 +54,15 @@ def test_rp_parent_item_id(mock_client_init): """ parent_id = "parent_id" variables = dict() - variables['rp_parent_item_id'] = parent_id + variables["rp_parent_item_id"] = parent_id variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_simple.py'], variables=variables) + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value - expect(mock_client.start_launch.call_count == 1, - '"start_launch" method was not called') - expect(mock_client.finish_launch.call_count == 1, - '"finish_launch" method was not called') + expect(mock_client.start_launch.call_count == 1, '"start_launch" method was not called') + expect(mock_client.finish_launch.call_count == 1, '"finish_launch" method was not called') start_call_args = mock_client.start_test_item.call_args_list finish_call_args = mock_client.finish_test_item.call_args_list @@ -82,139 +80,128 @@ def test_rp_parent_item_id_and_rp_launch_id(mock_requests_init): """ parent_id = "parent_id" variables = dict() - variables['rp_parent_item_id'] = parent_id - variables['rp_launch_id'] = TEST_LAUNCH_ID + variables["rp_parent_item_id"] = parent_id + variables["rp_launch_id"] = TEST_LAUNCH_ID variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_simple.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_requests = mock_requests_init.return_value assert mock_requests.post.call_count == 1 item_start = mock_requests.post.call_args_list[0] - assert item_start[0][0].endswith(f'/item/{parent_id}') - assert item_start[1]['json']['launchUuid'] == TEST_LAUNCH_ID + assert item_start[0][0].endswith(f"/item/{parent_id}") + assert item_start[1]["json"]["launchUuid"] == TEST_LAUNCH_ID @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_log_format(mock_client_init): - log_format = '(%(name)s) %(message)s (%(filename)s:%(lineno)s)' - variables = {'rp_log_format': log_format} + log_format = "(%(name)s) %(message)s (%(filename)s:%(lineno)s)" + variables = {"rp_log_format": log_format} variables.update(utils.DEFAULT_VARIABLES.items()) mock_client = mock_client_init.return_value - result = utils.run_tests_with_client( - mock_client, ['examples/test_rp_logging.py'], variables=variables) + result = utils.run_tests_with_client(mock_client, ["examples/test_rp_logging.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client.log.call_count == 1) message = mock_client.log.call_args_list[0][0][1] expect(len(message) > 0) - expect(message == f'(test_rp_logging) {LOG_MESSAGE} (test_rp_logging.py:24)') + expect(message == f"(test_rp_logging) {LOG_MESSAGE} (test_rp_logging.py:24)") assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_log_batch_payload_size(mock_client_init): log_size = 123456 - variables = {'rp_log_batch_payload_size': log_size} + variables = {"rp_log_batch_payload_size": log_size} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['log_batch_payload_size'] == log_size) + expect(constructor_args["log_batch_payload_size"] == log_size) assert_expectations() def filter_agent_call(warn): - category = getattr(warn, 'category', None) + category = getattr(warn, "category", None) if category: - return category.__name__ == 'DeprecationWarning' or category.__name__ == 'RuntimeWarning' + return category.__name__ == "DeprecationWarning" or category.__name__ == "RuntimeWarning" return False def filter_agent_calls(warning_list): - return list( - filter( - lambda call: filter_agent_call(call), - warning_list - ) - ) + return list(filter(lambda call: filter_agent_call(call), warning_list)) @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_api_key(mock_client_init): - api_key = 'rp_api_key' + api_key = "rp_api_key" variables = dict(utils.DEFAULT_VARIABLES) - variables.update({'rp_api_key': api_key}.items()) + variables.update({"rp_api_key": api_key}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['api_key'] == api_key) + expect(constructor_args["api_key"] == api_key) expect(len(filter_agent_calls(w)) == 0) assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_uuid(mock_client_init): - api_key = 'rp_api_key' + api_key = "rp_api_key" variables = dict(utils.DEFAULT_VARIABLES) - del variables['rp_api_key'] - variables.update({'rp_uuid': api_key}.items()) + del variables["rp_api_key"] + variables.update({"rp_uuid": api_key}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['api_key'] == api_key) + expect(constructor_args["api_key"] == api_key) expect(len(filter_agent_calls(w)) == 1) assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_api_key_priority(mock_client_init): - api_key = 'rp_api_key' + api_key = "rp_api_key" variables = dict(utils.DEFAULT_VARIABLES) - variables.update({'rp_api_key': api_key, 'rp_uuid': 'rp_uuid'}.items()) + variables.update({"rp_api_key": api_key, "rp_uuid": "rp_uuid"}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['api_key'] == api_key) + expect(constructor_args["api_key"] == api_key) expect(len(filter_agent_calls(w)) == 0) assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_api_key_empty(mock_client_init): - api_key = '' + api_key = "" variables = dict(utils.DEFAULT_VARIABLES) - variables.update({'rp_api_key': api_key}.items()) + variables.update({"rp_api_key": api_key}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 0) expect(len(filter_agent_calls(w)) == 1) @@ -225,16 +212,16 @@ def test_rp_api_key_empty(mock_client_init): def test_rp_api_retries(mock_client_init): retries = 5 variables = dict(utils.DEFAULT_VARIABLES) - variables.update({'rp_api_retries': str(retries)}.items()) + variables.update({"rp_api_retries": str(retries)}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['retries'] == retries) + expect(constructor_args["retries"] == retries) expect(len(filter_agent_calls(w)) == 0) assert_expectations() @@ -243,30 +230,29 @@ def test_rp_api_retries(mock_client_init): def test_retries(mock_client_init): retries = 5 variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'retries': str(retries)}.items()) + variables.update({"retries": str(retries)}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], - variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) constructor_args = mock_client_init.call_args_list[0][1] - expect(constructor_args['retries'] == retries) + expect(constructor_args["retries"] == retries) expect(len(filter_agent_calls(w)) == 1) assert_expectations() @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_issue_system_url_warning(mock_client_init): - url = 'https://bugzilla.some.com/show_bug.cgi?id={issue_id}' + url = "https://bugzilla.some.com/show_bug.cgi?id={issue_id}" variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'rp_issue_system_url': str(url)}.items()) + variables.update({"rp_issue_system_url": str(url)}.items()) with warnings.catch_warnings(record=True) as w: - result = utils.run_pytest_tests(['examples/test_issue_id.py'], variables=variables) - assert int(result) == 1, 'Exit code should be 1 (test failure)' + result = utils.run_pytest_tests(["examples/test_issue_id.py"], variables=variables) + assert int(result) == 1, "Exit code should be 1 (test failure)" expect(mock_client_init.call_count == 1) expect(len(filter_agent_calls(w)) == 1) @@ -277,12 +263,12 @@ def test_rp_issue_system_url_warning(mock_client_init): def test_launch_uuid_print(mock_client_init): print_uuid = True variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'rp_launch_uuid_print': str(print_uuid)}.items()) - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + variables.update({"rp_launch_uuid_print": str(print_uuid)}.items()) + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) - expect(mock_client_init.call_args_list[0][1]['launch_uuid_print'] == print_uuid) - expect(mock_client_init.call_args_list[0][1]['print_output'] is None) + expect(mock_client_init.call_args_list[0][1]["launch_uuid_print"] == print_uuid) + expect(mock_client_init.call_args_list[0][1]["print_output"] is None) assert_expectations() @@ -290,12 +276,12 @@ def test_launch_uuid_print(mock_client_init): def test_launch_uuid_print_stderr(mock_client_init): print_uuid = True variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'rp_launch_uuid_print': str(print_uuid), 'rp_launch_uuid_print_output': 'stderr'}.items()) - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + variables.update({"rp_launch_uuid_print": str(print_uuid), "rp_launch_uuid_print_output": "stderr"}.items()) + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) - expect(mock_client_init.call_args_list[0][1]['launch_uuid_print'] == print_uuid) - expect(mock_client_init.call_args_list[0][1]['print_output'] is OutputType.STDERR) + expect(mock_client_init.call_args_list[0][1]["launch_uuid_print"] == print_uuid) + expect(mock_client_init.call_args_list[0][1]["print_output"] is OutputType.STDERR) assert_expectations() @@ -303,43 +289,37 @@ def test_launch_uuid_print_stderr(mock_client_init): def test_launch_uuid_print_invalid_output(mock_client_init): print_uuid = True variables = utils.DEFAULT_VARIABLES.copy() - variables.update({'rp_launch_uuid_print': str(print_uuid), 'rp_launch_uuid_print_output': 'something'}.items()) - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 3, 'Exit code should be 3 (INTERNALERROR)' + variables.update({"rp_launch_uuid_print": str(print_uuid), "rp_launch_uuid_print_output": "something"}.items()) + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 3, "Exit code should be 3 (INTERNALERROR)" assert mock_client_init.call_count == 0 @mock.patch(REPORT_PORTAL_SERVICE) def test_no_launch_uuid_print(mock_client_init): variables = utils.DEFAULT_VARIABLES.copy() - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" expect(mock_client_init.call_count == 1) - expect(mock_client_init.call_args_list[0][1]['launch_uuid_print'] is False) - expect(mock_client_init.call_args_list[0][1]['print_output'] is None) + expect(mock_client_init.call_args_list[0][1]["launch_uuid_print"] is False) + expect(mock_client_init.call_args_list[0][1]["print_output"] is None) assert_expectations() @pytest.mark.parametrize( - 'connect_value, read_value, expected_result', - [ - ('5', '15', (5.0, 15.0)), - ('5.5', '15.5', (5.5, 15.5)), - (None, None, None), - (None, '5', 5), - ('5', None, 5) - ] + "connect_value, read_value, expected_result", + [("5", "15", (5.0, 15.0)), ("5.5", "15.5", (5.5, 15.5)), (None, None, None), (None, "5", 5), ("5", None, 5)], ) @mock.patch(REPORT_PORTAL_SERVICE) def test_client_timeouts(mock_client_init, connect_value, read_value, expected_result): variables = utils.DEFAULT_VARIABLES.copy() if connect_value: - variables['rp_connect_timeout'] = connect_value + variables["rp_connect_timeout"] = connect_value if read_value: - variables['rp_read_timeout'] = read_value + variables["rp_read_timeout"] = read_value - result = utils.run_pytest_tests(['examples/test_rp_logging.py'], variables=variables) + result = utils.run_pytest_tests(["examples/test_rp_logging.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client_init.call_count == 1 - assert mock_client_init.call_args_list[0][1]['http_timeout'] == expected_result + assert mock_client_init.call_args_list[0][1]["http_timeout"] == expected_result diff --git a/tests/integration/test_connection_close.py b/tests/integration/test_connection_close.py index 33be309..bf6cf9c 100644 --- a/tests/integration/test_connection_close.py +++ b/tests/integration/test_connection_close.py @@ -21,7 +21,7 @@ def test_connection_close(mock_client_init): mock_client = mock_client_init.return_value - result = utils.run_tests_with_client(mock_client, ['examples/test_rp_logging.py']) + result = utils.run_tests_with_client(mock_client, ["examples/test_rp_logging.py"]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client.close.call_count == 1, '"close" method was not called at the end of the test' diff --git a/tests/integration/test_custom_name.py b/tests/integration/test_custom_name.py index d0c1c98..16699fd 100644 --- a/tests/integration/test_custom_name.py +++ b/tests/integration/test_custom_name.py @@ -23,15 +23,18 @@ from tests.helpers import utils -@pytest.mark.parametrize('test, expected', [ - ('examples/custom_name/test_custom_name_args.py', TEST_NAME_ARGS), - ('examples/custom_name/test_custom_name_kwargs.py', TEST_NAME_KWARGS), - ('examples/custom_name/test_custom_name_empty.py', TEST_NAME_EMPTY) -]) +@pytest.mark.parametrize( + "test, expected", + [ + ("examples/custom_name/test_custom_name_args.py", TEST_NAME_ARGS), + ("examples/custom_name/test_custom_name_kwargs.py", TEST_NAME_KWARGS), + ("examples/custom_name/test_custom_name_empty.py", TEST_NAME_EMPTY), + ], +) @mock.patch(REPORT_PORTAL_SERVICE) def test_custom_attribute_report(mock_client_init, test, expected): result = utils.run_pytest_tests(tests=[test], variables=utils.DEFAULT_VARIABLES) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value start_count = mock_client.start_test_item.call_count @@ -40,5 +43,5 @@ def test_custom_attribute_report(mock_client_init, test, expected): call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[0][1] - assert step_call_args['name'] == expected, 'Incorrect item name' - assert step_call_args['attributes'] == [], 'No attributes should be added for the test item' + assert step_call_args["name"] == expected, "Incorrect item name" + assert step_call_args["attributes"] == [], "No attributes should be added for the test item" diff --git a/tests/integration/test_debug_mode.py b/tests/integration/test_debug_mode.py index d380b31..c7b6563 100644 --- a/tests/integration/test_debug_mode.py +++ b/tests/integration/test_debug_mode.py @@ -22,11 +22,7 @@ @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize(['mode', 'expected_mode'], [ - ('DEFAULT', 'DEFAULT'), - ('DEBUG', 'DEBUG'), - (None, 'DEFAULT') -]) +@pytest.mark.parametrize(["mode", "expected_mode"], [("DEFAULT", "DEFAULT"), ("DEBUG", "DEBUG"), (None, "DEFAULT")]) def test_launch_mode(mock_client_init, mode, expected_mode): """Verify different launch modes are passed to `start_launch` method. @@ -37,13 +33,13 @@ def test_launch_mode(mock_client_init, mode, expected_mode): """ variables = dict() if mode is not None: - variables['rp_mode'] = mode + variables["rp_mode"] = mode variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_simple.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client_init.call_count == 1, "client wasn't initialized" init_kwargs = mock_client_init.call_args_list[0][1] - assert 'mode' in init_kwargs - assert init_kwargs['mode'] == expected_mode + assert "mode" in init_kwargs + assert init_kwargs["mode"] == expected_mode diff --git a/tests/integration/test_empty_run.py b/tests/integration/test_empty_run.py index b5e5b1a..e85b4c7 100644 --- a/tests/integration/test_empty_run.py +++ b/tests/integration/test_empty_run.py @@ -13,9 +13,10 @@ """This module includes integration tests for the empty run.""" -from delayed_assert import expect, assert_expectations from unittest import mock +from delayed_assert import assert_expectations, expect + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @@ -26,9 +27,9 @@ def test_empty_run(mock_client_init): :param mock_client_init: Pytest fixture """ - result = utils.run_pytest_tests(tests=['examples/empty/']) + result = utils.run_pytest_tests(tests=["examples/empty/"]) - assert int(result) == 5, 'Exit code should be 5 (no tests)' + assert int(result) == 5, "Exit code should be 5 (no tests)" mock_client = mock_client_init.return_value expect(mock_client.start_launch.call_count == 1, '"start_launch" method was not called') @@ -36,7 +37,7 @@ def test_empty_run(mock_client_init): assert_expectations() finish_args = mock_client.finish_launch.call_args_list - expect('status' not in finish_args[0][1], 'Launch status should not be defined') - launch_end_time = finish_args[0][1]['end_time'] - expect(launch_end_time is not None and int(launch_end_time) > 0, 'Launch end time is empty') + expect("status" not in finish_args[0][1], "Launch status should not be defined") + launch_end_time = finish_args[0][1]["end_time"] + expect(launch_end_time is not None and int(launch_end_time) > 0, "Launch end time is empty") assert_expectations() diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index d8d017c..c51dc70 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -22,13 +22,20 @@ from examples.fixtures.test_failure_fixture_teardown.conftest import ( LOG_MESSAGE_BEFORE_YIELD as LOG_MESSAGE_BEFORE_YIELD_TEST_FAILURE, - LOG_MESSAGE_TEARDOWN as LOG_MESSAGE_TEARDOWN_TEST_FAILURE) +) +from examples.fixtures.test_failure_fixture_teardown.conftest import ( + LOG_MESSAGE_TEARDOWN as LOG_MESSAGE_TEARDOWN_TEST_FAILURE, +) from examples.fixtures.test_fixture_return_none.conftest import LOG_MESSAGE_SETUP as LOG_MESSAGE_BEFORE_RETURN_NONE from examples.fixtures.test_fixture_setup.conftest import LOG_MESSAGE_SETUP as SINGLE_SETUP_MESSAGE from examples.fixtures.test_fixture_setup_failure.conftest import LOG_MESSAGE_SETUP as LOG_MESSAGE_SETUP_FAILURE from examples.fixtures.test_fixture_teardown.conftest import LOG_MESSAGE_BEFORE_YIELD, LOG_MESSAGE_TEARDOWN from examples.fixtures.test_fixture_teardown_failure.conftest import ( - LOG_MESSAGE_BEFORE_YIELD as LOG_MESSAGE_BEFORE_YIELD_FAILURE, LOG_MESSAGE_TEARDOWN as LOG_MESSAGE_TEARDOWN_FAILURE) + LOG_MESSAGE_BEFORE_YIELD as LOG_MESSAGE_BEFORE_YIELD_FAILURE, +) +from examples.fixtures.test_fixture_teardown_failure.conftest import ( + LOG_MESSAGE_TEARDOWN as LOG_MESSAGE_TEARDOWN_FAILURE, +) from examples.fixtures.test_fixture_yield_none.conftest import LOG_MESSAGE_SETUP as LOG_MESSAGE_BEFORE_YIELD_NONE from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @@ -41,9 +48,9 @@ def generate_item_id(*args, **kwargs) -> str: if args: name = args[0] else: - name = kwargs['name'] + name = kwargs["name"] ITEM_ID_DICT[name] += 1 - item_id = f'{name}_{ITEM_ID_DICT[name]}' + item_id = f"{name}_{ITEM_ID_DICT[name]}" ITEM_ID_LIST.append(item_id) return item_id @@ -72,38 +79,39 @@ def setup_mock_for_logging(mock_client_init): return mock_client -@pytest.mark.parametrize('switch', [True, False]) +@pytest.mark.parametrize("switch", [True, False]) @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_on_off(mock_client_init, switch): mock_client = setup_mock(mock_client_init) variables = dict(utils.DEFAULT_VARIABLES) - variables['rp_report_fixtures'] = switch - result = utils.run_pytest_tests(tests=['examples/fixtures/test_fixture_teardown'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + variables["rp_report_fixtures"] = switch + result = utils.run_pytest_tests(tests=["examples/fixtures/test_fixture_teardown"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count expected_count = 3 if switch else 1 - assert start_count == finish_count == expected_count, \ - 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert ( + start_count == finish_count == expected_count + ), 'Incorrect number of "start_test_item" or "finish_test_item" calls' def run_tests(test_path, should_fail=False): variables = dict(utils.DEFAULT_VARIABLES) - variables['rp_report_fixtures'] = True + variables["rp_report_fixtures"] = True result = utils.run_pytest_tests(tests=[test_path], variables=variables) if should_fail: - assert int(result) == 1, 'Exit code should be 1 (test failure)' + assert int(result) == 1, "Exit code should be 1 (test failure)" else: - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_setup(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_setup' + test_path = "examples/fixtures/test_fixture_setup" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -113,17 +121,17 @@ def test_fixture_setup(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'function fixture setup: {fixture_name}' + step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - assert teardown_call_args[0] == f'function fixture teardown: {fixture_name}' + assert teardown_call_args[0] == f"function fixture teardown: {fixture_name}" setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 1, 'Incorrect number of "log" calls' @@ -133,14 +141,14 @@ def test_fixture_setup(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == SINGLE_SETUP_MESSAGE - assert log_call_kwargs['item_id'] == f'{step_name}_1' + assert log_call_kwargs["item_id"] == f"{step_name}_1" @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_teardown(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_teardown' + test_path = "examples/fixtures/test_fixture_teardown" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -150,18 +158,18 @@ def test_fixture_teardown(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 2, 'Incorrect number of "log" calls' @@ -171,22 +179,24 @@ def test_fixture_teardown(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_YIELD - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" log_call_args = log_call_args_list[-1][0] log_call_kwargs = log_call_args_list[-1][1] assert log_call_args[1] == LOG_MESSAGE_TEARDOWN - assert log_call_kwargs['item_id'] == \ - 'examples/fixtures/test_fixture_teardown/test_fixture_teardown.py::test_fixture_teardown_1' + assert ( + log_call_kwargs["item_id"] + == "examples/fixtures/test_fixture_teardown/test_fixture_teardown.py::test_fixture_teardown_1" + ) -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') +@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_setup_failure(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_setup_failure' + test_path = "examples/fixtures/test_fixture_setup_failure" run_tests(test_path, True) start_count = mock_client.start_test_item.call_count @@ -196,11 +206,11 @@ def test_fixture_setup_failure(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'function fixture setup: {fixture_name}' + step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 2, 'Incorrect number of "log" calls' @@ -210,21 +220,24 @@ def test_fixture_setup_failure(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_SETUP_FAILURE - assert log_call_kwargs['item_id'] == f'{step_name}_1' + assert log_call_kwargs["item_id"] == f"{step_name}_1" log_call_kwargs = log_call_args_list[1][1] - assert log_call_kwargs['message'].endswith( - 'examples/fixtures/test_fixture_setup_failure/conftest.py:30: Exception') - assert log_call_kwargs['item_id'] == \ - 'examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py::test_fixture_setup_failure_1' + assert log_call_kwargs["message"].endswith( + "examples/fixtures/test_fixture_setup_failure/conftest.py:30: Exception" + ) + assert ( + log_call_kwargs["item_id"] + == "examples/fixtures/test_fixture_setup_failure/test_fixture_setup_failure.py::test_fixture_setup_failure_1" + ) @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_teardown_failure(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_teardown_failure' + test_path = "examples/fixtures/test_fixture_teardown_failure" run_tests(test_path, True) start_count = mock_client.start_test_item.call_count @@ -234,18 +247,18 @@ def test_fixture_teardown_failure(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 3, 'Incorrect number of "log" calls' @@ -255,30 +268,33 @@ def test_fixture_teardown_failure(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_YIELD_FAILURE - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" log_call_args = log_call_args_list[1][0] log_call_kwargs = log_call_args_list[1][1] assert log_call_args[1] == LOG_MESSAGE_TEARDOWN_FAILURE - assert log_call_kwargs['item_id'] == \ - ('examples/fixtures/test_fixture_teardown_failure/test_fixture_teardown_failure.py::' - 'test_fixture_teardown_failure_1') + assert log_call_kwargs["item_id"] == ( + "examples/fixtures/test_fixture_teardown_failure/test_fixture_teardown_failure.py::" + "test_fixture_teardown_failure_1" + ) log_call_kwargs = log_call_args_list[2][1] - assert log_call_kwargs['message'].endswith( - 'examples/fixtures/test_fixture_teardown_failure/conftest.py:34: Exception') - assert log_call_kwargs['item_id'] == \ - ('examples/fixtures/test_fixture_teardown_failure/test_fixture_teardown_failure.py::' - 'test_fixture_teardown_failure_1') + assert log_call_kwargs["message"].endswith( + "examples/fixtures/test_fixture_teardown_failure/conftest.py:34: Exception" + ) + assert log_call_kwargs["item_id"] == ( + "examples/fixtures/test_fixture_teardown_failure/test_fixture_teardown_failure.py::" + "test_fixture_teardown_failure_1" + ) @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_yield_none(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_yield_none' + test_path = "examples/fixtures/test_fixture_yield_none" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -288,18 +304,18 @@ def test_fixture_yield_none(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 1, 'Incorrect number of "log" calls' @@ -309,14 +325,14 @@ def test_fixture_yield_none(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_YIELD_NONE - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_return_none(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_return_none' + test_path = "examples/fixtures/test_fixture_return_none" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -326,18 +342,18 @@ def test_fixture_return_none(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 1, 'Incorrect number of "log" calls' @@ -347,14 +363,14 @@ def test_fixture_return_none(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_RETURN_NONE - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" @mock.patch(REPORT_PORTAL_SERVICE) def test_failure_fixture_teardown(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_failure_fixture_teardown' + test_path = "examples/fixtures/test_failure_fixture_teardown" run_tests(test_path, True) start_count = mock_client.start_test_item.call_count @@ -364,18 +380,18 @@ def test_failure_fixture_teardown(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - setup_step_name = f'function fixture setup: {fixture_name}' + setup_step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == setup_step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - teardown_step_name = f'function fixture teardown: {fixture_name}' + teardown_step_name = f"function fixture teardown: {fixture_name}" assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 3, 'Incorrect number of "log" calls' @@ -385,31 +401,34 @@ def test_failure_fixture_teardown(mock_client_init): log_call_kwargs = log_call_args_list[0][1] assert log_call_args[1] == LOG_MESSAGE_BEFORE_YIELD_TEST_FAILURE - assert log_call_kwargs['item_id'] == f'{setup_step_name}_1' + assert log_call_kwargs["item_id"] == f"{setup_step_name}_1" log_call_args = log_call_args_list[2][0] log_call_kwargs = log_call_args_list[2][1] assert log_call_args[1] == LOG_MESSAGE_TEARDOWN_TEST_FAILURE - assert log_call_kwargs['item_id'] == \ - ('examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::' - 'test_failure_fixture_teardown_1') + assert log_call_kwargs["item_id"] == ( + "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::" + "test_failure_fixture_teardown_1" + ) log_call_kwargs = log_call_args_list[1][1] - assert log_call_kwargs['message'].endswith( - 'examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py:28: AssertionError') - assert log_call_kwargs['item_id'] == \ - ('examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::' - 'test_failure_fixture_teardown_1') + assert log_call_kwargs["message"].endswith( + "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py:28: AssertionError" + ) + assert log_call_kwargs["item_id"] == ( + "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::" + "test_failure_fixture_teardown_1" + ) -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') +@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_session_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) - test_path = 'examples/fixtures/session_fixture_return' + test_path = "examples/fixtures/session_fixture_return" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -419,25 +438,25 @@ def test_session_fixture_setup(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'session fixture setup: {fixture_name}' + step_name = f"session fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - assert teardown_call_args[0] == f'session fixture teardown: {fixture_name}' + assert teardown_call_args[0] == f"session fixture teardown: {fixture_name}" setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') +@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_package_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) - test_path = 'examples/fixtures/package_fixture_return' + test_path = "examples/fixtures/package_fixture_return" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -447,25 +466,25 @@ def test_package_fixture_setup(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'package fixture setup: {fixture_name}' + step_name = f"package fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - assert teardown_call_args[0] == f'package fixture teardown: {fixture_name}' + assert teardown_call_args[0] == f"package fixture teardown: {fixture_name}" setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') +@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_module_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) - test_path = 'examples/fixtures/module_fixture_return' + test_path = "examples/fixtures/module_fixture_return" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -475,25 +494,25 @@ def test_module_fixture_setup(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'module fixture setup: {fixture_name}' + step_name = f"module fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] - assert teardown_call_args[0] == f'module fixture teardown: {fixture_name}' + assert teardown_call_args[0] == f"module fixture teardown: {fixture_name}" setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason='Python 3.8+ required due to bugs in older versions') +@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_class_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) - test_path = 'examples/fixtures/class_fixture_return' + test_path = "examples/fixtures/class_fixture_return" run_tests(test_path) start_count = mock_client.start_test_item.call_count @@ -503,50 +522,50 @@ def test_class_fixture_setup(mock_client_init): call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] fixture_name = f'{test_path.split("/")[-1]}_config' - step_name = f'class fixture setup: {fixture_name}' + step_name = f"class fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] setup_call_args = call_args[-3][0] assert setup_call_args[0] == step_name setup_call_kwargs = call_args[-3][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] - teardown_step_name = f'class fixture teardown: {fixture_name}' + teardown_step_name = f"class fixture teardown: {fixture_name}" teardown_call_args = call_args[-5][0] assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-5][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] teardown_call_args = call_args[-1][0] assert teardown_call_args[0] == teardown_step_name setup_call_kwargs = call_args[-1][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_setup_skip(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) - test_path = 'examples/fixtures/test_fixture_skipped/test_fixture_skipped.py' + test_path = "examples/fixtures/test_fixture_skipped/test_fixture_skipped.py" run_tests(test_path, False) call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[2][0] - fixture_name = 'skip_fixture' - step_name = f'function fixture setup: {fixture_name}' + fixture_name = "skip_fixture" + step_name = f"function fixture setup: {fixture_name}" assert setup_call_args[0] == step_name setup_call_kwargs = call_args[2][1] - assert not setup_call_kwargs['has_stats'] + assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count assert log_count == 1, 'Incorrect number of "log" calls' call_args = mock_client.finish_test_item.call_args_list finish_call_kwargs = call_args[1][1] - assert finish_call_kwargs['status'] == 'PASSED' + assert finish_call_kwargs["status"] == "PASSED" finish_call_kwargs = call_args[-1][1] - assert finish_call_kwargs['status'] == 'SKIPPED' + assert finish_call_kwargs["status"] == "SKIPPED" diff --git a/tests/integration/test_issue_report.py b/tests/integration/test_issue_report.py index dde357d..bdf0476 100644 --- a/tests/integration/test_issue_report.py +++ b/tests/integration/test_issue_report.py @@ -16,7 +16,7 @@ from unittest import mock import pytest -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect from reportportal_client.core.rp_issues import Issue from examples import test_issue_id @@ -24,14 +24,14 @@ from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils -ISSUE_PLACEHOLDER = '{issue_id}' -ISSUE_URL_PATTERN = f'https://bugzilla.some.com/show_bug.cgi?id={ISSUE_PLACEHOLDER}' -BTS_PROJECT = 'RP-TEST' -BTS_URL = 'https://bugzilla.some.com' +ISSUE_PLACEHOLDER = "{issue_id}" +ISSUE_URL_PATTERN = f"https://bugzilla.some.com/show_bug.cgi?id={ISSUE_PLACEHOLDER}" +BTS_PROJECT = "RP-TEST" +BTS_URL = "https://bugzilla.some.com" @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize('issue_id_mark', [True, False]) +@pytest.mark.parametrize("issue_id_mark", [True, False]) def test_issue_id_attribute(mock_client_init, issue_id_mark): """Verify agent reports issue attribute if configured. @@ -42,20 +42,20 @@ def test_issue_id_attribute(mock_client_init, issue_id_mark): mock_client.start_test_item.side_effect = utils.item_id_gen mock_client.get_project_settings.side_effect = utils.project_settings - variables = {'rp_issue_id_marks': issue_id_mark} + variables = {"rp_issue_id_marks": issue_id_mark} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_issue_id.py'], variables=variables) - assert int(result) == 1, 'Exit code should be 1 (test failed)' + result = utils.run_pytest_tests(tests=["examples/test_issue_id.py"], variables=variables) + assert int(result) == 1, "Exit code should be 1 (test failed)" call_args = mock_client.start_test_item.call_args_list finish_test_step = call_args[-1][1] - attributes = finish_test_step['attributes'] + attributes = finish_test_step["attributes"] if issue_id_mark: assert len(attributes) == 1 issue_attribute = attributes[0] - expect(issue_attribute['key'] == 'issue') - expect(issue_attribute['value'] == test_issue_id.ID) + expect(issue_attribute["key"] == "issue") + expect(issue_attribute["value"] == test_issue_id.ID) assert_expectations() else: assert len(attributes) == 0 @@ -71,24 +71,25 @@ def test_issue_report(mock_client_init): mock_client.start_test_item.side_effect = utils.item_id_gen mock_client.get_project_settings.side_effect = utils.project_settings - variables = {'rp_issue_system_url': ISSUE_URL_PATTERN} + variables = {"rp_issue_system_url": ISSUE_URL_PATTERN} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_issue_id.py'], variables=variables) - assert int(result) == 1, 'Exit code should be 1 (test failed)' + result = utils.run_pytest_tests(tests=["examples/test_issue_id.py"], variables=variables) + assert int(result) == 1, "Exit code should be 1 (test failed)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - issue = finish_test_step['issue'] + issue = finish_test_step["issue"] assert isinstance(issue, Issue) - expect(issue.issue_type == 'pb001') + expect(issue.issue_type == "pb001") expect(issue.comment is not None) assert_expectations() - comments = issue.comment.split('\n') + comments = issue.comment.split("\n") assert len(comments) == 1 comment = comments[0] assert comment == "* {}: [{}]({})".format( - test_issue_id.REASON, test_issue_id.ID, ISSUE_URL_PATTERN.replace(ISSUE_PLACEHOLDER, test_issue_id.ID)) + test_issue_id.REASON, test_issue_id.ID, ISSUE_URL_PATTERN.replace(ISSUE_PLACEHOLDER, test_issue_id.ID) + ) @mock.patch(REPORT_PORTAL_SERVICE) @@ -101,21 +102,17 @@ def test_passed_no_issue_report(mock_client_init): mock_client.start_test_item.side_effect = utils.item_id_gen mock_client.get_project_settings.side_effect = utils.project_settings - variables = {'rp_issue_system_url': ISSUE_URL_PATTERN} + variables = {"rp_issue_system_url": ISSUE_URL_PATTERN} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_issue_id_pass.py'], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no failures)' + result = utils.run_pytest_tests(tests=["examples/test_issue_id_pass.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no failures)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - assert 'issue' not in finish_test_step or finish_test_step['issue'] is None + assert "issue" not in finish_test_step or finish_test_step["issue"] is None -@pytest.mark.parametrize(('flag_value', 'expected_issue'), [ - (True, None), - (False, NOT_ISSUE), - (None, None) -]) +@pytest.mark.parametrize(("flag_value", "expected_issue"), [(True, None), (False, NOT_ISSUE), (None, None)]) @mock.patch(REPORT_PORTAL_SERVICE) def test_skipped_not_issue(mock_client_init, flag_value, expected_issue): """Verify 'rp_is_skipped_an_issue' option handling. @@ -129,15 +126,15 @@ def test_skipped_not_issue(mock_client_init, flag_value, expected_issue): variables = dict() if flag_value is not None: - variables['rp_is_skipped_an_issue'] = flag_value + variables["rp_is_skipped_an_issue"] = flag_value variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/skip/test_simple_skip.py'], variables=variables) + result = utils.run_pytest_tests(tests=["examples/skip/test_simple_skip.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no failures)' + assert int(result) == 0, "Exit code should be 0 (no failures)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - actual_issue = finish_test_step.get('issue', None) + actual_issue = finish_test_step.get("issue", None) assert actual_issue == expected_issue @@ -152,18 +149,18 @@ def test_skipped_custom_issue(mock_client_init): mock_client.get_project_settings.side_effect = utils.project_settings variables = dict() - variables['rp_is_skipped_an_issue'] = True - variables['rp_issue_system_url'] = ISSUE_URL_PATTERN + variables["rp_is_skipped_an_issue"] = True + variables["rp_issue_system_url"] = ISSUE_URL_PATTERN variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/skip/test_skip_issue.py'], variables=variables) + result = utils.run_pytest_tests(tests=["examples/skip/test_skip_issue.py"], variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no failures)' + assert int(result) == 0, "Exit code should be 0 (no failures)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - actual_issue = finish_test_step.get('issue', None) + actual_issue = finish_test_step.get("issue", None) assert isinstance(actual_issue, Issue) - expect(actual_issue.issue_type == 'pb001') + expect(actual_issue.issue_type == "pb001") expect(actual_issue.comment is not None) assert_expectations() @@ -178,28 +175,24 @@ def test_external_issue(mock_client_init): mock_client.start_test_item.side_effect = utils.item_id_gen mock_client.get_project_settings.side_effect = utils.project_settings - variables = { - 'rp_bts_project': BTS_PROJECT, - 'rp_bts_url': BTS_URL, - 'rp_bts_issue_url': ISSUE_URL_PATTERN - } + variables = {"rp_bts_project": BTS_PROJECT, "rp_bts_url": BTS_URL, "rp_bts_issue_url": ISSUE_URL_PATTERN} variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=['examples/test_issue_id.py'], variables=variables) + result = utils.run_pytest_tests(tests=["examples/test_issue_id.py"], variables=variables) - assert int(result) == 1, 'Exit code should be 1 (test failed)' + assert int(result) == 1, "Exit code should be 1 (test failed)" call_args = mock_client.finish_test_item.call_args_list finish_test_step = call_args[0][1] - actual_issue = finish_test_step.get('issue', None) + actual_issue = finish_test_step.get("issue", None) assert isinstance(actual_issue, Issue) - expect(actual_issue.issue_type == 'pb001') + expect(actual_issue.issue_type == "pb001") expect(actual_issue.comment is not None) external_issues = actual_issue._external_issues expect(len(external_issues) == 1) assert_expectations() external_issue = external_issues[0] - expect(external_issue['btsUrl'] == BTS_URL) - expect(external_issue['btsProject'] == BTS_PROJECT) - expect(external_issue['ticketId'] == test_issue_id.ID) - expect(external_issue['url'] == ISSUE_URL_PATTERN.replace(ISSUE_PLACEHOLDER, test_issue_id.ID)) + expect(external_issue["btsUrl"] == BTS_URL) + expect(external_issue["btsProject"] == BTS_PROJECT) + expect(external_issue["ticketId"] == test_issue_id.ID) + expect(external_issue["url"] == ISSUE_URL_PATTERN.replace(ISSUE_PLACEHOLDER, test_issue_id.ID)) assert_expectations() diff --git a/tests/integration/test_max_name_length.py b/tests/integration/test_max_name_length.py index 11a1b9f..51430db 100644 --- a/tests/integration/test_max_name_length.py +++ b/tests/integration/test_max_name_length.py @@ -20,8 +20,8 @@ @mock.patch(REPORT_PORTAL_SERVICE) def test_custom_attribute_report(mock_client_init): - result = utils.run_pytest_tests(tests=['examples/test_max_item_name.py'], variables=utils.DEFAULT_VARIABLES) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/test_max_item_name.py"], variables=utils.DEFAULT_VARIABLES) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value start_count = mock_client.start_test_item.call_count @@ -30,4 +30,4 @@ def test_custom_attribute_report(mock_client_init): call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[0][1] - assert len(step_call_args['name']) == 1024, 'Incorrect item name length' + assert len(step_call_args["name"]) == 1024, "Incorrect item name length" diff --git a/tests/integration/test_parameters_report.py b/tests/integration/test_parameters_report.py index e4a7cba..b5ac571 100644 --- a/tests/integration/test_parameters_report.py +++ b/tests/integration/test_parameters_report.py @@ -13,22 +13,28 @@ """This module includes integration tests for parameters report.""" -import pytest from unittest import mock +import pytest + from examples.params.test_binary_symbol_in_parameters import BINARY_TEXT from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.parametrize(['test', 'expected_params'], [ - ('examples/test_simple.py', None), - ('examples/params/test_in_class_parameterized.py', {'param': 'param'}), - ('examples/params/test_different_parameter_types.py', - {'integer': 1, 'floating_point': 1.5, 'boolean': True, 'none': None}), - ('examples/params/test_binary_symbol_in_parameters.py', {'text': BINARY_TEXT.replace('\0', '\\0')}), -]) +@pytest.mark.parametrize( + ["test", "expected_params"], + [ + ("examples/test_simple.py", None), + ("examples/params/test_in_class_parameterized.py", {"param": "param"}), + ( + "examples/params/test_different_parameter_types.py", + {"integer": 1, "floating_point": 1.5, "boolean": True, "none": None}, + ), + ("examples/params/test_binary_symbol_in_parameters.py", {"text": BINARY_TEXT.replace("\0", "\\0")}), + ], +) def test_parameters(mock_client_init, test, expected_params): """Verify different tests have correct parameters. @@ -37,11 +43,11 @@ def test_parameters(mock_client_init, test, expected_params): :param expected_params: an expected parameter dictionary """ result = utils.run_pytest_tests(tests=[test]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' call_args = mock_client.start_test_item.call_args_list step_call_args = call_args[-1][1] - assert step_call_args['parameters'] == expected_params + assert step_call_args["parameters"] == expected_params diff --git a/tests/integration/test_pass_failed_skipped.py b/tests/integration/test_pass_failed_skipped.py index f69c4e7..97bb4ad 100644 --- a/tests/integration/test_pass_failed_skipped.py +++ b/tests/integration/test_pass_failed_skipped.py @@ -16,18 +16,20 @@ from unittest import mock import pytest -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils -@pytest.mark.parametrize(('test', 'expected_run_status', 'expected_item_status'), [ - ('examples/test_simple.py', 0, 'PASSED'), - ('examples/test_simple_fail.py', 1, 'FAILED'), - ('examples/skip/test_simple_skip.py', 0, - 'SKIPPED') -]) +@pytest.mark.parametrize( + ("test", "expected_run_status", "expected_item_status"), + [ + ("examples/test_simple.py", 0, "PASSED"), + ("examples/test_simple_fail.py", 1, "FAILED"), + ("examples/skip/test_simple_skip.py", 0, "SKIPPED"), + ], +) @mock.patch(REPORT_PORTAL_SERVICE) def test_simple_tests(mock_client_init, test, expected_run_status, expected_item_status): """Verify a simple test creates correct structure and finishes all items. @@ -43,25 +45,27 @@ def test_simple_tests(mock_client_init, test, expected_run_status, expected_item mock_client.start_test_item.side_effect = utils.item_id_gen result = utils.run_pytest_tests(tests=[test]) - assert int(result) == expected_run_status, 'Exit code should be ' + str(expected_run_status) + assert int(result) == expected_run_status, "Exit code should be " + str(expected_run_status) start_call_args = mock_client.start_test_item.call_args_list finish_call_args = mock_client.finish_test_item.call_args_list - assert len(start_call_args) == len(finish_call_args), 'Number of started items should be equal to finished items' + assert len(start_call_args) == len(finish_call_args), "Number of started items should be equal to finished items" for i in range(len(start_call_args)): start_test_step = start_call_args[-1 - i][1] finish_test_step = finish_call_args[i][1] - expect(finish_test_step['item_id'].startswith(start_test_step['name'])) + expect(finish_test_step["item_id"].startswith(start_test_step["name"])) if i == 0: - actual_status = finish_test_step['status'] - expect(actual_status == expected_item_status, - f'Invalid item status, actual "{actual_status}", expected: "{expected_item_status}"') + actual_status = finish_test_step["status"] + expect( + actual_status == expected_item_status, + f'Invalid item status, actual "{actual_status}", expected: "{expected_item_status}"', + ) finish_launch_call_args = mock_client.finish_launch.call_args_list expect(len(finish_launch_call_args) == 1) - expect('end_time' in finish_launch_call_args[0][1]) - expect(finish_launch_call_args[0][1]['end_time'] is not None) - expect('status' not in finish_launch_call_args[0][1]) + expect("end_time" in finish_launch_call_args[0][1]) + expect(finish_launch_call_args[0][1]["end_time"] is not None) + expect("status" not in finish_launch_call_args[0][1]) assert_expectations() diff --git a/tests/integration/test_pytest_parallel.py b/tests/integration/test_pytest_parallel.py index 7007882..9c94a9b 100644 --- a/tests/integration/test_pytest_parallel.py +++ b/tests/integration/test_pytest_parallel.py @@ -13,17 +13,18 @@ """This module includes integration tests for "pytest_parallel" plugin.""" -import pytest -from delayed_assert import expect, assert_expectations from unittest import mock +import pytest +from delayed_assert import assert_expectations, expect + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils from tests.helpers.utils import item_id_gen @mock.patch(REPORT_PORTAL_SERVICE) -@pytest.mark.skip(reason='This test breaks all other tests, so only for local execution') +@pytest.mark.skip(reason="This test breaks all other tests, so only for local execution") def test_pytest_parallel_threads(mock_client_init): """Verify "pytest_parallel" plugin run tests in two threads. @@ -32,8 +33,8 @@ def test_pytest_parallel_threads(mock_client_init): mock_client = mock_client_init.return_value mock_client.start_test_item.side_effect = item_id_gen - result = utils.run_pytest_tests(tests=['examples/hierarchy'], args=['--tests-per-worker', '2']) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + result = utils.run_pytest_tests(tests=["examples/hierarchy"], args=["--tests-per-worker", "2"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value @@ -42,7 +43,7 @@ def test_pytest_parallel_threads(mock_client_init): assert_expectations() finish_args = mock_client.finish_launch.call_args_list - expect(finish_args[0][1]['status'] in ('PASSED', None), 'Launch failed') - launch_end_time = finish_args[0][1]['end_time'] - expect(launch_end_time is not None and int(launch_end_time) > 0, 'Launch end time is empty') + expect(finish_args[0][1]["status"] in ("PASSED", None), "Launch failed") + launch_end_time = finish_args[0][1]["end_time"] + expect(launch_end_time is not None and int(launch_end_time) > 0, "Launch end time is empty") assert_expectations() diff --git a/tests/integration/test_suite_hierarchy.py b/tests/integration/test_suite_hierarchy.py index 248b47f..6ac1498 100644 --- a/tests/integration/test_suite_hierarchy.py +++ b/tests/integration/test_suite_hierarchy.py @@ -13,29 +13,30 @@ """This module includes integration tests for different suite hierarchy.""" -import pytest from unittest import mock +import pytest + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils from tests.integration import HIERARCHY_TEST_PARAMETERS def verify_start_item_parameters(mock_client, expected_items): - assert mock_client.start_test_item.call_count == len(expected_items), \ - '"start_test_item" method was called incorrect number of times' + assert mock_client.start_test_item.call_count == len( + expected_items + ), '"start_test_item" method was called incorrect number of times' call_args = mock_client.start_test_item.call_args_list for i, call in enumerate(call_args): start_kwargs = call[1] - assert start_kwargs['name'] == expected_items[i]['name'] - assert start_kwargs['item_type'] == expected_items[i]['item_type'] - verification = expected_items[i]['parent_item_id'] - assert verification(start_kwargs['parent_item_id']) + assert start_kwargs["name"] == expected_items[i]["name"] + assert start_kwargs["item_type"] == expected_items[i]["item_type"] + verification = expected_items[i]["parent_item_id"] + assert verification(start_kwargs["parent_item_id"]) -@pytest.mark.parametrize(('test', 'variables', 'expected_items'), - HIERARCHY_TEST_PARAMETERS) +@pytest.mark.parametrize(("test", "variables", "expected_items"), HIERARCHY_TEST_PARAMETERS) @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_hierarchy_parameters(mock_client_init, test, variables, expected_items): """Verify suite hierarchy with `rp_hierarchy_dirs=True`. @@ -46,6 +47,6 @@ def test_rp_hierarchy_parameters(mock_client_init, test, variables, expected_ite mock_client.start_test_item.side_effect = utils.item_id_gen result = utils.run_pytest_tests(tests=test, variables=variables) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" verify_start_item_parameters(mock_client, expected_items) diff --git a/tests/integration/test_threads_logs.py b/tests/integration/test_threads_logs.py index d6814f4..ae19161 100644 --- a/tests/integration/test_threads_logs.py +++ b/tests/integration/test_threads_logs.py @@ -28,17 +28,14 @@ def test_rp_thread_logs_reporting(mock_client_init): def init_thread_client(*_, **__): from reportportal_client import set_current + set_current(mock_thread_client) return mock_thread_client mock_client.clone.side_effect = init_thread_client - result = utils.run_tests_with_client( - mock_client, - ['examples/threads/'], - args=["--rp-thread-logging"] - ) + result = utils.run_tests_with_client(mock_client, ["examples/threads/"], args=["--rp-thread-logging"]) - assert int(result) == 0, 'Exit code should be 0 (no errors)' + assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client.start_launch.call_count == 1, '"start_launch" method was not called' assert mock_client.log.call_count == 1 assert mock_thread_client.log.call_count == 2 diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 1248b81..d7fd093 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -13,26 +13,27 @@ """This module contains common Pytest fixtures and hooks for unit tests.""" +# noinspection PyUnresolvedReferences +from unittest import mock + import py from _pytest.config import Config from _pytest.main import Session from pluggy._tracing import TagTracer -from pytest import fixture, Module -# noinspection PyUnresolvedReferences -from unittest import mock - +from pytest import Module, fixture from reportportal_client import RPLogger + from pytest_reportportal.config import AgentConfig from pytest_reportportal.service import PyTestServiceClass from tests import REPORT_PORTAL_SERVICE -ITEM_PATH = py.path.local('examples/test_simple.py') +ITEM_PATH = py.path.local("examples/test_simple.py") @fixture def logger(): """Prepare instance of the RPLogger for testing.""" - return RPLogger('pytest_reportportal.test') + return RPLogger("pytest_reportportal.test") @fixture() @@ -40,41 +41,35 @@ def mocked_config(): """Mock Pytest config for testing.""" mocked_config = mock.create_autospec(Config) - mocked_config.getoption_side_effects = { - '--collect-only': False, - '--setup-plan': False, - 'rp_log_level': 'debug' - } + mocked_config.getoption_side_effects = {"--collect-only": False, "--setup-plan": False, "rp_log_level": "debug"} def getoption_side_effect(name, default=None): - return mocked_config.getoption_side_effects.get( - name, default if default else mock.Mock() - ) + return mocked_config.getoption_side_effects.get(name, default if default else mock.Mock()) mocked_config._reporter_config = mock.Mock() mocked_config.getoption.side_effect = getoption_side_effect mocked_config._rp_enabled = True - mocked_config.rootdir = py.path.local('/path/to') - mocked_config.trace = TagTracer().get('root') + mocked_config.rootdir = py.path.local("/path/to") + mocked_config.trace = TagTracer().get("root") mocked_config.pluginmanager = mock.Mock() mocked_config.option = mock.create_autospec(Config) - mocked_config.option.rp_project = 'default_personal' - mocked_config.option.rp_endpoint = 'http://docker.local:8080/' + mocked_config.option.rp_project = "default_personal" + mocked_config.option.rp_endpoint = "http://docker.local:8080/" mocked_config.option.rp_api_key = mock.sentinel.rp_api_key mocked_config.option.rp_log_batch_size = -1 mocked_config.option.retries = -1 - mocked_config.option.rp_hierarchy_dirs_level = '0' + mocked_config.option.rp_hierarchy_dirs_level = "0" mocked_config.option.rp_rerun = False mocked_config.option.rp_launch_timeout = -1 mocked_config.option.rp_thread_logging = True - mocked_config.option.rp_launch_uuid_print = 'False' - mocked_config.option.rp_launch_uuid_print_output = 'STDOUT' - mocked_config.option.rp_client_type = 'SYNC' - mocked_config.option.rp_report_fixtures = 'False' - mocked_config.option.rp_hierarchy_code = 'False' - mocked_config.option.rp_hierarchy_dirs = 'False' - mocked_config.option.rp_hierarchy_test_file = 'True' - mocked_config.option.rp_skip_connection_test = 'False' + mocked_config.option.rp_launch_uuid_print = "False" + mocked_config.option.rp_launch_uuid_print_output = "STDOUT" + mocked_config.option.rp_client_type = "SYNC" + mocked_config.option.rp_report_fixtures = "False" + mocked_config.option.rp_hierarchy_code = "False" + mocked_config.option.rp_hierarchy_dirs = "False" + mocked_config.option.rp_hierarchy_test_file = "True" + mocked_config.option.rp_skip_connection_test = "False" return mocked_config @@ -91,7 +86,7 @@ def mocked_module(mocked_session): """Mock Pytest Module for testing.""" mocked_module = mock.create_autospec(Module) mocked_module.parent = mocked_session - mocked_module.name = 'module' + mocked_module.name = "module" mocked_module.fspath = ITEM_PATH return mocked_module @@ -102,7 +97,7 @@ def mocked_item(mocked_session, mocked_module): test_item = mock.Mock() test_item.session = mocked_session test_item.fspath = ITEM_PATH - name = 'test_item' + name = "test_item" test_item.name = name test_item.originalname = name test_item.parent = mocked_module @@ -113,6 +108,6 @@ def mocked_item(mocked_session, mocked_module): def rp_service(mocked_config): """Prepare instance of the PyTestServiceClass for testing.""" service = PyTestServiceClass(AgentConfig(mocked_config)) - with mock.patch(REPORT_PORTAL_SERVICE + '.get_project_settings'): + with mock.patch(REPORT_PORTAL_SERVICE + ".get_project_settings"): service.start() return service diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 900a840..bf70947 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -12,25 +12,25 @@ # limitations under the License import pytest + from pytest_reportportal.config import AgentConfig @pytest.mark.parametrize( - ['verify_ssl', 'expected_result'], + ["verify_ssl", "expected_result"], [ - ('True', True), - ('False', False), - ('true', True), - ('false', False), + ("True", True), + ("False", False), + ("true", True), + ("false", False), (True, True), (False, False), - ('path/to/certificate', 'path/to/certificate'), - (None, True) - ] + ("path/to/certificate", "path/to/certificate"), + (None, True), + ], ) def test_verify_ssl_true(mocked_config, verify_ssl, expected_result): - mocked_config.getini.side_effect = \ - lambda x: verify_ssl if x == 'rp_verify_ssl' else None + mocked_config.getini.side_effect = lambda x: verify_ssl if x == "rp_verify_ssl" else None config = AgentConfig(mocked_config) assert config.rp_verify_ssl == expected_result diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py index 85745fa..a2efa71 100644 --- a/tests/unit/test_plugin.py +++ b/tests/unit/test_plugin.py @@ -18,21 +18,22 @@ import pytest from _pytest.config.argparsing import Parser -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect from reportportal_client.errors import ResponseError from requests.exceptions import RequestException from pytest_reportportal.config import AgentConfig from pytest_reportportal.plugin import ( + FAILED_LAUNCH_WAIT, + MANDATORY_PARAMETER_MISSED_PATTERN, is_control, log, pytest_addoption, - pytest_configure, pytest_collection_finish, - pytest_sessionstart, + pytest_configure, pytest_sessionfinish, + pytest_sessionstart, wait_launch, - MANDATORY_PARAMETER_MISSED_PATTERN, FAILED_LAUNCH_WAIT ) from pytest_reportportal.service import PyTestServiceClass @@ -41,42 +42,40 @@ def test_is_control(mocked_config): """Test is_master() function for the correct responses.""" mocked_config.workerinput = None expect(is_control(mocked_config) is False) - delattr(mocked_config, 'workerinput') + delattr(mocked_config, "workerinput") expect(is_control(mocked_config) is True) assert_expectations() -@mock.patch('reportportal_client.logs.RPLogger.handle') -@pytest.mark.parametrize('log_level', ('info', 'debug', 'warning', 'error')) +@mock.patch("reportportal_client.logs.RPLogger.handle") +@pytest.mark.parametrize("log_level", ("info", "debug", "warning", "error")) def test_logger_handle_attachment(mock_handler, logger, log_level): """Test logger call for different log levels with some text attachment.""" log_call = getattr(logger, log_level) - attachment = 'Some {} attachment'.format(log_level) + attachment = "Some {} attachment".format(log_level) log_call("Some {} message".format(log_level), attachment=attachment) - expect(mock_handler.call_count == 1, - 'logger.handle called more than 1 time') - expect(getattr(mock_handler.call_args[0][0], 'attachment') == attachment, - "record.attachment in args doesn't match real value") + expect(mock_handler.call_count == 1, "logger.handle called more than 1 time") + expect( + getattr(mock_handler.call_args[0][0], "attachment") == attachment, + "record.attachment in args doesn't match real value", + ) assert_expectations() -@mock.patch('reportportal_client.logs.RPLogger.handle') -@pytest.mark.parametrize('log_level', ('info', 'debug', 'warning', 'error')) +@mock.patch("reportportal_client.logs.RPLogger.handle") +@pytest.mark.parametrize("log_level", ("info", "debug", "warning", "error")) def test_logger_handle_no_attachment(mock_handler, logger, log_level): """Test logger call for different log levels without any attachment.""" log_call = getattr(logger, log_level) - log_call('Some {} message'.format(log_level)) - expect(mock_handler.call_count == 1, - 'logger.handle called more than 1 time') - expect(getattr(mock_handler.call_args[0][0], 'attachment') is None, - 'record.attachment in args is not None') + log_call("Some {} message".format(log_level)) + expect(mock_handler.call_count == 1, "logger.handle called more than 1 time") + expect(getattr(mock_handler.call_args[0][0], "attachment") is None, "record.attachment in args is not None") assert_expectations() -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.PyTestServiceClass') -def test_portal_on_maintenance(mocked_service_class, mocked_config, - mocked_session): +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.PyTestServiceClass") +def test_portal_on_maintenance(mocked_service_class, mocked_config, mocked_session): """Test session configuration if RP is in maintenance mode. :param mocked_session: pytest fixture @@ -86,14 +85,13 @@ def test_portal_on_maintenance(mocked_service_class, mocked_config, mocked_service = mocked_service_class.return_value mocked_config.py_test_service = mocked_service - mocked_service.start.side_effect = \ - ResponseError("Report Portal - Maintenance") + mocked_service.start.side_effect = ResponseError("Report Portal - Maintenance") pytest_sessionstart(mocked_session) assert mocked_config.py_test_service.rp is None -@mock.patch('pytest_reportportal.plugin.requests.Session.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) +@mock.patch("pytest_reportportal.plugin.requests.Session.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) def test_pytest_configure(mocked_config): """Test plugin successful configuration. @@ -103,18 +101,14 @@ def test_pytest_configure(mocked_config): mocked_config.option.rp_project = None pytest_configure(mocked_config) expect(mocked_config._rp_enabled is True) - expect( - lambda: isinstance(mocked_config.py_test_service, PyTestServiceClass)) + expect(lambda: isinstance(mocked_config.py_test_service, PyTestServiceClass)) assert_expectations() mocked_config.getoption.assert_has_calls( - [ - mock.call('--collect-only', default=False), - mock.call('--setup-plan', default=False) - ] + [mock.call("--collect-only", default=False), mock.call("--setup-plan", default=False)] ) -@mock.patch('pytest_reportportal.plugin.requests.get') +@mock.patch("pytest_reportportal.plugin.requests.get") def test_pytest_configure_dry_run(mocked_config): """Test plugin configuration in case of dry-run execution.""" mocked_config.getoption.return_value = True @@ -122,8 +116,8 @@ def test_pytest_configure_dry_run(mocked_config): assert mocked_config._rp_enabled is False -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.log', wraps=log) +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.log", wraps=log) def test_pytest_configure_misssing_rp_endpoint(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_endpoint. @@ -145,14 +139,15 @@ def test_pytest_configure_misssing_rp_endpoint(mocked_log, mocked_config): mocked_config.option.rp_project, None, mocked_config.option.rp_api_key, - )), - mock.call('Disabling reporting to RP.'), + ) + ), + mock.call("Disabling reporting to RP."), ] ) -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.log', wraps=log) +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.log", wraps=log) def test_pytest_configure_misssing_rp_project(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_project. @@ -174,14 +169,15 @@ def test_pytest_configure_misssing_rp_project(mocked_log, mocked_config): None, mocked_config.option.rp_endpoint, mocked_config.option.rp_api_key, - )), - mock.call('Disabling reporting to RP.'), + ) + ), + mock.call("Disabling reporting to RP."), ] ) -@mock.patch('pytest_reportportal.plugin.requests.get', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.log', wraps=log) +@mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.log", wraps=log) def test_pytest_configure_misssing_rp_uuid(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_uuid. @@ -203,13 +199,14 @@ def test_pytest_configure_misssing_rp_uuid(mocked_log, mocked_config): mocked_config.option.rp_project, mocked_config.option.rp_endpoint, None, - )), - mock.call('Disabling reporting to RP.'), + ) + ), + mock.call("Disabling reporting to RP."), ] ) -@mock.patch('pytest_reportportal.plugin.requests.get') +@mock.patch("pytest_reportportal.plugin.requests.get") def test_pytest_configure_on_conn_error(mocked_get, mocked_config): """Test plugin configuration in case of HTTP error. @@ -223,13 +220,13 @@ def test_pytest_configure_on_conn_error(mocked_get, mocked_config): mock_response.raise_for_status.side_effect = RequestException() mocked_get.return_value = mock_response mocked_config.option.rp_enabled = True - mocked_config.option.rp_skip_connection_test = 'False' + mocked_config.option.rp_skip_connection_test = "False" pytest_configure(mocked_config) assert mocked_config._rp_enabled is False -@mock.patch('pytest_reportportal.plugin.LAUNCH_WAIT_TIMEOUT', 1) -@mock.patch('pytest_reportportal.plugin.time') +@mock.patch("pytest_reportportal.plugin.LAUNCH_WAIT_TIMEOUT", 1) +@mock.patch("pytest_reportportal.plugin.time") def test_wait_launch(time_mock): """Test wait_launch() function for the correct behavior.""" time_mock.time.side_effect = [0, 1, 2] @@ -245,13 +242,11 @@ def test_pytest_collection_finish(mocked_session): """ mocked_session.config.py_test_service = mock.Mock() pytest_collection_finish(mocked_session) - mocked_session.config.py_test_service.collect_tests. \ - assert_called_with(mocked_session) + mocked_session.config.py_test_service.collect_tests.assert_called_with(mocked_session) -@mock.patch('pytest_reportportal.plugin.wait_launch', - mock.Mock(return_value=True)) -@mock.patch('pytest_reportportal.plugin.is_control', mock.Mock()) +@mock.patch("pytest_reportportal.plugin.wait_launch", mock.Mock(return_value=True)) +@mock.patch("pytest_reportportal.plugin.is_control", mock.Mock()) def test_pytest_sessionstart(mocked_session): """Test session configuration if RP plugin is correctly configured. @@ -270,18 +265,16 @@ def test_pytest_sessionstart(mocked_session): assert_expectations() -@mock.patch('pytest_reportportal.plugin.log', wraps=log) -@mock.patch('pytest_reportportal.plugin.is_control', mock.Mock()) -@mock.patch('pytest_reportportal.plugin.wait_launch', - mock.Mock(return_value=False)) +@mock.patch("pytest_reportportal.plugin.log", wraps=log) +@mock.patch("pytest_reportportal.plugin.is_control", mock.Mock()) +@mock.patch("pytest_reportportal.plugin.wait_launch", mock.Mock(return_value=False)) def test_pytest_sessionstart_launch_wait_fail(mocked_log, mocked_session): """Test session configuration if RP plugin is correctly configured. :param mocked_session: pytest fixture """ mocked_session.config.pluginmanager.hasplugin.return_value = True - mocked_session.config._reporter_config = mock.Mock( - spec=AgentConfig(mocked_session.config)) + mocked_session.config._reporter_config = mock.Mock(spec=AgentConfig(mocked_session.config)) mocked_session.config._reporter_config.rp_launch_attributes = [] mocked_session.config._reporter_config.rp_launch_id = None mocked_session.config.py_test_service = mock.Mock() @@ -289,14 +282,10 @@ def test_pytest_sessionstart_launch_wait_fail(mocked_log, mocked_session): expect(lambda: mocked_session.config.py_test_service.rp is None) expect(lambda: mocked_session.config._rp_enabled is False) assert_expectations() - mocked_log.error.assert_has_calls( - [ - mock.call(FAILED_LAUNCH_WAIT) - ] - ) + mocked_log.error.assert_has_calls([mock.call(FAILED_LAUNCH_WAIT)]) -@mock.patch('pytest_reportportal.plugin.wait_launch', mock.Mock()) +@mock.patch("pytest_reportportal.plugin.wait_launch", mock.Mock()) def test_pytest_sessionstart_xdist(mocked_session): """Test session configuration if it's worker xdist node. @@ -313,7 +302,7 @@ def test_pytest_sessionstart_xdist(mocked_session): assert_expectations() -@mock.patch('pytest_reportportal.plugin.is_control', mock.Mock()) +@mock.patch("pytest_reportportal.plugin.is_control", mock.Mock()) def test_pytest_sessionfinish(mocked_session): """Test sessionfinish with the configured RP plugin. @@ -329,47 +318,47 @@ def test_pytest_addoption_adds_correct_ini_file_arguments(): """Test the correct list of options are available in the .ini file.""" mock_parser = mock.MagicMock(spec=Parser) expected_argument_names = ( - 'rp_launch', - 'rp_launch_id', - 'rp_launch_description', - 'rp_project', - 'rp_log_level', - 'rp_log_format', - 'rp_rerun', - 'rp_rerun_of', - 'rp_parent_item_id', - 'rp_uuid', - 'rp_api_key', - 'rp_endpoint', - 'rp_mode', - 'rp_thread_logging', - 'rp_launch_uuid_print', - 'rp_launch_uuid_print_output', - 'rp_launch_attributes', - 'rp_tests_attributes', - 'rp_log_batch_size', - 'rp_log_batch_payload_size', - 'rp_ignore_attributes', - 'rp_is_skipped_an_issue', - 'rp_hierarchy_code', - 'rp_hierarchy_dirs_level', - 'rp_hierarchy_dirs', - 'rp_hierarchy_dir_path_separator', - 'rp_hierarchy_test_file', - 'rp_issue_system_url', - 'rp_bts_issue_url', - 'rp_bts_project', - 'rp_bts_url', - 'rp_verify_ssl', - 'rp_issue_id_marks', - 'retries', - 'rp_api_retries', - 'rp_skip_connection_test', - 'rp_launch_timeout', - 'rp_client_type', - 'rp_connect_timeout', - 'rp_read_timeout', - 'rp_report_fixtures' + "rp_launch", + "rp_launch_id", + "rp_launch_description", + "rp_project", + "rp_log_level", + "rp_log_format", + "rp_rerun", + "rp_rerun_of", + "rp_parent_item_id", + "rp_uuid", + "rp_api_key", + "rp_endpoint", + "rp_mode", + "rp_thread_logging", + "rp_launch_uuid_print", + "rp_launch_uuid_print_output", + "rp_launch_attributes", + "rp_tests_attributes", + "rp_log_batch_size", + "rp_log_batch_payload_size", + "rp_ignore_attributes", + "rp_is_skipped_an_issue", + "rp_hierarchy_code", + "rp_hierarchy_dirs_level", + "rp_hierarchy_dirs", + "rp_hierarchy_dir_path_separator", + "rp_hierarchy_test_file", + "rp_issue_system_url", + "rp_bts_issue_url", + "rp_bts_project", + "rp_bts_url", + "rp_verify_ssl", + "rp_issue_id_marks", + "retries", + "rp_api_retries", + "rp_skip_connection_test", + "rp_launch_timeout", + "rp_client_type", + "rp_connect_timeout", + "rp_read_timeout", + "rp_report_fixtures", ) pytest_addoption(mock_parser) @@ -383,23 +372,23 @@ def test_pytest_addoption_adds_correct_ini_file_arguments(): def test_pytest_addoption_adds_correct_command_line_arguments(): """Test the correct list of options are available in the command line.""" expected_argument_names = ( - '--reportportal', - '--rp-launch', - '--rp-launch-id', - '--rp-launch-description', - '--rp-project', - '--rp-log-level', - '--rp-log-format', - '--rp-rerun', - '--rp-rerun-of', - '--rp-parent-item-id', - '--rp-uuid', - '--rp-api-key', - '--rp-endpoint', - '--rp-mode', - '--rp-thread-logging', - '--rp-launch-uuid-print', - '--rp-launch-uuid-print-output' + "--reportportal", + "--rp-launch", + "--rp-launch-id", + "--rp-launch-description", + "--rp-project", + "--rp-log-level", + "--rp-log-format", + "--rp-rerun", + "--rp-rerun-of", + "--rp-parent-item-id", + "--rp-uuid", + "--rp-api-key", + "--rp-endpoint", + "--rp-mode", + "--rp-thread-logging", + "--rp-launch-uuid-print", + "--rp-launch-uuid-print-output", ) mock_parser = mock.MagicMock(spec=Parser) mock_reporting_group = mock_parser.getgroup.return_value diff --git a/tests/unit/test_service.py b/tests/unit/test_service.py index 8bfeb54..286a4c4 100644 --- a/tests/unit/test_service.py +++ b/tests/unit/test_service.py @@ -13,16 +13,16 @@ """This module includes unit tests for the service.py module.""" -from delayed_assert import expect, assert_expectations +from delayed_assert import assert_expectations, expect def test_get_item_parameters(mocked_item, rp_service): """Test that parameters are returned in a way supported by the client.""" - mocked_item.callspec.params = {'param': 'param_value'} + mocked_item.callspec.params = {"param": "param_value"} - expect(rp_service._get_parameters(mocked_item) == {'param': 'param_value'}) + expect(rp_service._get_parameters(mocked_item) == {"param": "param_value"}) - delattr(mocked_item, 'callspec') + delattr(mocked_item, "callspec") expect(rp_service._get_parameters(mocked_item) is None) assert_expectations() From c39e9532d42d8fa71b338de16861e1948030e896 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 14:55:48 +0300 Subject: [PATCH 016/110] Fix typing --- pytest_reportportal/service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 6dc5605..1fa88de 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -537,7 +537,7 @@ def _get_issue_description_line(self, mark, default_url): issues += template.format(issue_id=issue_id, url=issue_url) return ISSUE_DESCRIPTION_LINE_TEMPLATE.format(reason, issues) - def _get_issue(self, mark) -> Issue: + def _get_issue(self, mark) -> Optional[Issue]: """Add issues description and issue_type to the test item. :param mark: pytest mark @@ -620,7 +620,7 @@ def _process_test_case_id(self, leaf): return self._get_test_case_id(tc_ids[0], leaf) return self._get_test_case_id(None, leaf) - def _process_issue(self, item) -> Issue: + def _process_issue(self, item) -> Optional[Issue]: """ Process Issue if set. From d8e8f5ecbcbb4b518afdee4cff2ea127906c5ad5 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 15:04:31 +0300 Subject: [PATCH 017/110] Fix test --- tests/integration/test_fixtures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index c51dc70..d68c320 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -415,7 +415,7 @@ def test_failure_fixture_teardown(mock_client_init): log_call_kwargs = log_call_args_list[1][1] assert log_call_kwargs["message"].endswith( - "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py:28: AssertionError" + "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py:29: AssertionError" ) assert log_call_kwargs["item_id"] == ( "examples/fixtures/test_failure_fixture_teardown/test_failure_fixture_teardown.py::" From 6a4acc83324e4efa60c35ada42884062c8649bc1 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 15:57:19 +0300 Subject: [PATCH 018/110] Disable pydocstyle on examples --- .pre-commit-config.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 071dfb1..a110d32 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,8 @@ repos: - id: pydocstyle exclude: | (?x)^( - tests/.* + tests/.* | + examples/.* ) - repo: https://github.com/psf/black rev: 24.10.0 From 303ab98a7258a13c913a468615dc3d30ec9ef2dc Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 16:02:24 +0300 Subject: [PATCH 019/110] Client version update --- CHANGELOG.md | 2 ++ requirements.txt | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2b6ac3..675bbc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ ## [5.4.7] ### Added - Issue [#382](https://github.com/reportportal/agent-python-pytest/issues/382): Escaping of binary symbol '\0' in parameters, by @HardNorth +### Changed +- Client version updated on [5.6.0](https://github.com/reportportal/client-Python/releases/tag/5.6.0), by @HardNorth ## [5.4.6] ### Added diff --git a/requirements.txt b/requirements.txt index 616a738..5b6b3a0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ dill>=0.3.6 pytest>=3.8.0 -reportportal-client~=5.5.10 +reportportal-client~=5.6.0 aenum>=3.1.0 From f1322568117737ca58e5c48b432e4b841a4e05eb Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 16:08:26 +0300 Subject: [PATCH 020/110] Fix black config --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a110d32..3ef9df4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: rev: 24.10.0 hooks: - id: black - args: [ '--check', 'robotframework_reportportal', 'tests' ] + args: [ '--check', 'pytest_reportportal', 'tests' ] - repo: https://github.com/pycqa/isort rev: 5.13.2 hooks: From 190dbc274db1989033fa0831f4a6e3101f048dfe Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 16:17:18 +0300 Subject: [PATCH 021/110] Bump required version of pytest --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5b6b3a0..c488176 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ dill>=0.3.6 -pytest>=3.8.0 +pytest>=4.6.10 reportportal-client~=5.6.0 aenum>=3.1.0 From f3caa44d70acf65bf360374f68646da948f619b3 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 7 Feb 2025 17:23:12 +0300 Subject: [PATCH 022/110] Add pytest_bdd example --- examples/bdd/features/arguments.feature | 8 ++++++ examples/bdd/step_defs/__init__.py | 13 ++++++++++ examples/bdd/step_defs/test_arguments.py | 33 ++++++++++++++++++++++++ 3 files changed, 54 insertions(+) create mode 100644 examples/bdd/features/arguments.feature create mode 100644 examples/bdd/step_defs/__init__.py create mode 100644 examples/bdd/step_defs/test_arguments.py diff --git a/examples/bdd/features/arguments.feature b/examples/bdd/features/arguments.feature new file mode 100644 index 0000000..31ff061 --- /dev/null +++ b/examples/bdd/features/arguments.feature @@ -0,0 +1,8 @@ +Feature: Step arguments + Scenario: Arguments for given, when, then + Given there are 5 cucumbers + + When I eat 3 cucumbers + And I eat 2 cucumbers + + Then I should have 0 cucumbers diff --git a/examples/bdd/step_defs/__init__.py b/examples/bdd/step_defs/__init__.py new file mode 100644 index 0000000..8175387 --- /dev/null +++ b/examples/bdd/step_defs/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/examples/bdd/step_defs/test_arguments.py b/examples/bdd/step_defs/test_arguments.py new file mode 100644 index 0000000..a79392d --- /dev/null +++ b/examples/bdd/step_defs/test_arguments.py @@ -0,0 +1,33 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import scenarios, given, when, then, parsers + + +scenarios("../features/arguments.feature") + + +@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "eat": 0} + + +@when(parsers.parse("I eat {eat:d} cucumbers")) +def eat_cucumbers(cucumbers, eat): + cucumbers["eat"] += eat + + +@then(parsers.parse("I should have {left:d} cucumbers")) +def should_have_left_cucumbers(cucumbers, left): + assert cucumbers["start"] - cucumbers["eat"] == left From 36fa65c3bf7a17e032ab4e233f783a462c8c91e6 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Mon, 10 Feb 2025 12:49:23 +0300 Subject: [PATCH 023/110] pytest_bdd: WIP --- pytest_reportportal/plugin.py | 56 ++++++++++++++++++++++++++++++++++- tests/integration/test_bdd.py | 29 ++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_bdd.py diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index c36c2f7..bb9a3f0 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -37,10 +37,12 @@ try: # noinspection PyPackageRequirements - from pytest_bdd import given # noqa: F401 + from pytest_bdd.parser import Feature, Scenario PYTEST_BDD = True except ImportError: + Feature = type("dummy", (), {}) + Scenario = type("dummy", (), {}) PYTEST_BDD = False log: Logger = logging.getLogger(__name__) @@ -368,6 +370,58 @@ def pytest_fixture_post_finalizer(fixturedef, request) -> Generator[None, Any, N ) +if PYTEST_BDD: + + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_before_scenario(request, feature: Feature, scenario: Scenario) -> Generator[None, Any, None]: + """Report BDD scenario start. + + :param request: represents fixture execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + """ + config = request.config + if not config._rp_enabled: + yield + return + + yield + # service = config.py_test_service + # agent_config = config._reporter_config + # service.start_bdd_scenario(scenario, feature) + # log_level = agent_config.rp_log_level or logging.NOTSET + # log_handler = RPLogHandler( + # level=log_level, + # filter_client_logs=True, + # endpoint=agent_config.rp_endpoint, + # ignored_record_names=("reportportal_client", "pytest_reportportal"), + # ) + # log_format = agent_config.rp_log_format + # if log_format: + # log_handler.setFormatter(logging.Formatter(log_format)) + # with patching_logger_class(): + # with _pytest.logging.catching_logs(log_handler, level=log_level): + # yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> Generator[None, Any, None]: + """Report BDD scenario finish. + + :param request: represents fixture execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + """ + config = request.config + if not config._rp_enabled: + yield + return + + yield + # service = config.py_test_service + # service.finish_bdd_scenario(scenario) + # yield + + # no types for backward compatibility for older pytest versions def pytest_addoption(parser) -> None: """Add support for the RP-related options. diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py new file mode 100644 index 0000000..c7071c6 --- /dev/null +++ b/tests/integration/test_bdd.py @@ -0,0 +1,29 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import mock + +from tests import REPORT_PORTAL_SERVICE +from tests.helpers import utils + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd(mock_client_init): + variables = {} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + mock_client = mock_client_init.return_value + assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' From 067286c25de9815499dade0d1e3359f1f10e7001 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Mon, 10 Feb 2025 18:04:01 +0300 Subject: [PATCH 024/110] pytest_bdd: WIP --- examples/bdd/features/arguments2.feature | 7 ++++ .../step_defs/test_arguments_two_features.py | 33 +++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 examples/bdd/features/arguments2.feature create mode 100644 examples/bdd/step_defs/test_arguments_two_features.py diff --git a/examples/bdd/features/arguments2.feature b/examples/bdd/features/arguments2.feature new file mode 100644 index 0000000..b010568 --- /dev/null +++ b/examples/bdd/features/arguments2.feature @@ -0,0 +1,7 @@ +Feature: Less step arguments + Scenario: Arguments for given, when, then, less steps + Given there are 5 cucumbers + + When I eat 5 cucumbers + + Then I should have 0 cucumbers diff --git a/examples/bdd/step_defs/test_arguments_two_features.py b/examples/bdd/step_defs/test_arguments_two_features.py new file mode 100644 index 0000000..3a392dc --- /dev/null +++ b/examples/bdd/step_defs/test_arguments_two_features.py @@ -0,0 +1,33 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import scenarios, given, when, then, parsers + + +scenarios("../features/arguments.feature", "../features/arguments2.feature") + + +@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "eat": 0} + + +@when(parsers.parse("I eat {eat:d} cucumbers")) +def eat_cucumbers(cucumbers, eat): + cucumbers["eat"] += eat + + +@then(parsers.parse("I should have {left:d} cucumbers")) +def should_have_left_cucumbers(cucumbers, left): + assert cucumbers["start"] - cucumbers["eat"] == left From ccecf83a8fe384a6245a4bcc3649f17b48d330c3 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 14:42:14 +0300 Subject: [PATCH 025/110] Backward compatibility fix --- pytest_reportportal/rp_logging.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytest_reportportal/rp_logging.py b/pytest_reportportal/rp_logging.py index d6ce9a0..8bc7e6e 100644 --- a/pytest_reportportal/rp_logging.py +++ b/pytest_reportportal/rp_logging.py @@ -18,7 +18,7 @@ import threading from contextlib import contextmanager from functools import wraps -from typing import Any +from typing import Any, List, Dict from reportportal_client import RPLogger, current, set_current from reportportal_client.core.worker import APIWorker @@ -114,7 +114,7 @@ def patching_logger_class(): def wrap_log(original_func): @wraps(original_func) - def _log(self, *args: list[Any], **kwargs: dict[str, Any]): + def _log(self, *args: List[Any], **kwargs: Dict[str, Any]): my_kwargs = kwargs.copy() attachment = my_kwargs.pop("attachment", None) if attachment is not None: From 8ac2b148dcb6e5a8a0915367e90d2aed33eec075 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 16:15:14 +0300 Subject: [PATCH 026/110] Update BDD examples --- ...s.feature => arguments_four_steps.feature} | 4 +-- ....feature => arguments_three_steps.feature} | 2 +- .../features/arguments_two_scenarios.feature | 15 +++++++++ examples/bdd/step_defs/test_arguments.py | 2 +- .../step_defs/test_arguments_two_features.py | 2 +- .../step_defs/test_arguments_two_scenarios.py | 33 +++++++++++++++++++ 6 files changed, 53 insertions(+), 5 deletions(-) rename examples/bdd/features/{arguments.feature => arguments_four_steps.feature} (63%) rename examples/bdd/features/{arguments2.feature => arguments_three_steps.feature} (84%) create mode 100644 examples/bdd/features/arguments_two_scenarios.feature create mode 100644 examples/bdd/step_defs/test_arguments_two_scenarios.py diff --git a/examples/bdd/features/arguments.feature b/examples/bdd/features/arguments_four_steps.feature similarity index 63% rename from examples/bdd/features/arguments.feature rename to examples/bdd/features/arguments_four_steps.feature index 31ff061..a71ab7c 100644 --- a/examples/bdd/features/arguments.feature +++ b/examples/bdd/features/arguments_four_steps.feature @@ -1,5 +1,5 @@ -Feature: Step arguments - Scenario: Arguments for given, when, then +Feature: Four step arguments + Scenario: Arguments for given, when, and, then Given there are 5 cucumbers When I eat 3 cucumbers diff --git a/examples/bdd/features/arguments2.feature b/examples/bdd/features/arguments_three_steps.feature similarity index 84% rename from examples/bdd/features/arguments2.feature rename to examples/bdd/features/arguments_three_steps.feature index b010568..da740d7 100644 --- a/examples/bdd/features/arguments2.feature +++ b/examples/bdd/features/arguments_three_steps.feature @@ -1,4 +1,4 @@ -Feature: Less step arguments +Feature: Three step arguments Scenario: Arguments for given, when, then, less steps Given there are 5 cucumbers diff --git a/examples/bdd/features/arguments_two_scenarios.feature b/examples/bdd/features/arguments_two_scenarios.feature new file mode 100644 index 0000000..63d0221 --- /dev/null +++ b/examples/bdd/features/arguments_two_scenarios.feature @@ -0,0 +1,15 @@ +Feature: Two scenarios step arguments + Scenario: Arguments for given, when, then + Given there are 5 cucumbers + + When I eat 5 cucumbers + + Then I should have 0 cucumbers + + Scenario: Arguments for given, when, and, then + Given there are 5 cucumbers + + When I eat 3 cucumbers + And I eat 2 cucumbers + + Then I should have 0 cucumbers diff --git a/examples/bdd/step_defs/test_arguments.py b/examples/bdd/step_defs/test_arguments.py index a79392d..55afa4b 100644 --- a/examples/bdd/step_defs/test_arguments.py +++ b/examples/bdd/step_defs/test_arguments.py @@ -15,7 +15,7 @@ from pytest_bdd import scenarios, given, when, then, parsers -scenarios("../features/arguments.feature") +scenarios("../features/arguments_four_steps.feature") @given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") diff --git a/examples/bdd/step_defs/test_arguments_two_features.py b/examples/bdd/step_defs/test_arguments_two_features.py index 3a392dc..c6a0738 100644 --- a/examples/bdd/step_defs/test_arguments_two_features.py +++ b/examples/bdd/step_defs/test_arguments_two_features.py @@ -15,7 +15,7 @@ from pytest_bdd import scenarios, given, when, then, parsers -scenarios("../features/arguments.feature", "../features/arguments2.feature") +scenarios("../features/arguments_four_steps.feature", "../features/arguments_three_steps.feature") @given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") diff --git a/examples/bdd/step_defs/test_arguments_two_scenarios.py b/examples/bdd/step_defs/test_arguments_two_scenarios.py new file mode 100644 index 0000000..c44f2fb --- /dev/null +++ b/examples/bdd/step_defs/test_arguments_two_scenarios.py @@ -0,0 +1,33 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import scenarios, given, when, then, parsers + + +scenarios("../features/arguments_two_scenarios.feature") + + +@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "eat": 0} + + +@when(parsers.parse("I eat {eat:d} cucumbers")) +def eat_cucumbers(cucumbers, eat): + cucumbers["eat"] += eat + + +@then(parsers.parse("I should have {left:d} cucumbers")) +def should_have_left_cucumbers(cucumbers, left): + assert cucumbers["start"] - cucumbers["eat"] == left From 2654b97a51ab9912ef74669c800a187cade89b26 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 16:24:09 +0300 Subject: [PATCH 027/110] Pytest BDD implementation: add item reporting skip --- examples/bdd/step_defs/test_arguments.py | 3 +-- .../step_defs/test_arguments_two_features.py | 3 +-- .../step_defs/test_arguments_two_scenarios.py | 3 +-- pyproject.toml | 2 ++ pytest_reportportal/plugin.py | 23 ++++--------------- pytest_reportportal/rp_logging.py | 2 +- pytest_reportportal/service.py | 11 +++++++++ 7 files changed, 21 insertions(+), 26 deletions(-) diff --git a/examples/bdd/step_defs/test_arguments.py b/examples/bdd/step_defs/test_arguments.py index 55afa4b..97e27fb 100644 --- a/examples/bdd/step_defs/test_arguments.py +++ b/examples/bdd/step_defs/test_arguments.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytest_bdd import scenarios, given, when, then, parsers - +from pytest_bdd import given, parsers, scenarios, then, when scenarios("../features/arguments_four_steps.feature") diff --git a/examples/bdd/step_defs/test_arguments_two_features.py b/examples/bdd/step_defs/test_arguments_two_features.py index c6a0738..0f268c5 100644 --- a/examples/bdd/step_defs/test_arguments_two_features.py +++ b/examples/bdd/step_defs/test_arguments_two_features.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytest_bdd import scenarios, given, when, then, parsers - +from pytest_bdd import given, parsers, scenarios, then, when scenarios("../features/arguments_four_steps.feature", "../features/arguments_three_steps.feature") diff --git a/examples/bdd/step_defs/test_arguments_two_scenarios.py b/examples/bdd/step_defs/test_arguments_two_scenarios.py index c44f2fb..7e7db79 100644 --- a/examples/bdd/step_defs/test_arguments_two_scenarios.py +++ b/examples/bdd/step_defs/test_arguments_two_scenarios.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytest_bdd import scenarios, given, when, then, parsers - +from pytest_bdd import given, parsers, scenarios, then, when scenarios("../features/arguments_two_scenarios.feature") diff --git a/pyproject.toml b/pyproject.toml index b9bf7b3..a81c860 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,8 +8,10 @@ requires = [ build-backend = "setuptools.build_meta" [tool.isort] +py_version=310 line_length = 119 profile = "black" +skip_gitignore = true [tool.black] line-length = 119 diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index bb9a3f0..2e9700e 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -274,6 +274,10 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: yield return + if item.location[0].endswith("/pytest_bdd/scenario.py"): + yield + return + service = config.py_test_service agent_config = config._reporter_config service.start_pytest_item(item) @@ -386,22 +390,6 @@ def pytest_bdd_before_scenario(request, feature: Feature, scenario: Scenario) -> return yield - # service = config.py_test_service - # agent_config = config._reporter_config - # service.start_bdd_scenario(scenario, feature) - # log_level = agent_config.rp_log_level or logging.NOTSET - # log_handler = RPLogHandler( - # level=log_level, - # filter_client_logs=True, - # endpoint=agent_config.rp_endpoint, - # ignored_record_names=("reportportal_client", "pytest_reportportal"), - # ) - # log_format = agent_config.rp_log_format - # if log_format: - # log_handler.setFormatter(logging.Formatter(log_format)) - # with patching_logger_class(): - # with _pytest.logging.catching_logs(log_handler, level=log_level): - # yield @pytest.hookimpl(hookwrapper=True) def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> Generator[None, Any, None]: @@ -417,9 +405,6 @@ def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> return yield - # service = config.py_test_service - # service.finish_bdd_scenario(scenario) - # yield # no types for backward compatibility for older pytest versions diff --git a/pytest_reportportal/rp_logging.py b/pytest_reportportal/rp_logging.py index 8bc7e6e..a53ce81 100644 --- a/pytest_reportportal/rp_logging.py +++ b/pytest_reportportal/rp_logging.py @@ -18,7 +18,7 @@ import threading from contextlib import contextmanager from functools import wraps -from typing import Any, List, Dict +from typing import Any, Dict, List from reportportal_client import RPLogger, current, set_current from reportportal_client.core.worker import APIWorker diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 1fa88de..5a77697 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -43,6 +43,17 @@ except ImportError: # in pytest < 8.0 there is no such type Dir = type("dummy", (), {}) + +try: + # noinspection PyPackageRequirements + from pytest_bdd.parser import Feature, Scenario + + PYTEST_BDD = True +except ImportError: + Feature = type("dummy", (), {}) + Scenario = type("dummy", (), {}) + PYTEST_BDD = False + from reportportal_client import RP, create_client from reportportal_client.helpers import dict_to_payload, gen_attributes, get_launch_sys_attrs, get_package_version From 1b557f94475bb0f5f23f1213493484ca078871b4 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 16:26:57 +0300 Subject: [PATCH 028/110] Pytest BDD implementation: fix test --- tests/integration/test_bdd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index c7071c6..8e437a0 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -26,4 +26,4 @@ def test_bdd(mock_client_init): assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value - assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' + assert mock_client.start_test_item.call_count == 0, '"start_test_item" should not be called for BDD tests' From c056d76dc1c864363a71db8bd94228ee55332d85 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 16:43:58 +0300 Subject: [PATCH 029/110] Pytest BDD implementation: WIP --- pytest_reportportal/plugin.py | 62 ++++++++++++++++++++++++++++++++-- pytest_reportportal/service.py | 3 +- 2 files changed, 61 insertions(+), 4 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index 2e9700e..c4fb009 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -17,7 +17,7 @@ import os.path import time from logging import Logger -from typing import Any, Generator +from typing import Any, Callable, Dict, Generator import _pytest.logging import dill as pickle @@ -37,12 +37,13 @@ try: # noinspection PyPackageRequirements - from pytest_bdd.parser import Feature, Scenario + from pytest_bdd.parser import Feature, Scenario, Step PYTEST_BDD = True except ImportError: Feature = type("dummy", (), {}) Scenario = type("dummy", (), {}) + Step = type("dummy", (), {}) PYTEST_BDD = False log: Logger = logging.getLogger(__name__) @@ -274,7 +275,7 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: yield return - if item.location[0].endswith("/pytest_bdd/scenario.py"): + if PYTEST_BDD and item.location[0].endswith("/pytest_bdd/scenario.py"): yield return @@ -406,6 +407,61 @@ def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> yield + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_before_step( + request, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any] + ) -> Generator[None, Any, None]: + config = request.config + if not config._rp_enabled: + yield + return + + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_after_step( + request, + feature: Feature, + scenario: Scenario, + step: Step, + step_func: Callable[..., Any], + step_func_args: Dict[str, Any], + ) -> Generator[None, Any, None]: + config = request.config + if not config._rp_enabled: + yield + return + + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_step_error( + request, + feature: Feature, + scenario: Scenario, + step: Step, + step_func: Callable[..., Any], + step_func_args: Dict[str, Any], + exception, + ) -> Generator[None, Any, None]: + config = request.config + if not config._rp_enabled: + yield + return + + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_bdd_step_func_lookup_error( + request, feature: Feature, scenario: Scenario, step: Step, exception + ) -> Generator[None, Any, None]: + config = request.config + if not config._rp_enabled: + yield + return + + yield + # no types for backward compatibility for older pytest versions def pytest_addoption(parser) -> None: diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 5a77697..34fd069 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -46,12 +46,13 @@ try: # noinspection PyPackageRequirements - from pytest_bdd.parser import Feature, Scenario + from pytest_bdd.parser import Feature, Scenario, Step PYTEST_BDD = True except ImportError: Feature = type("dummy", (), {}) Scenario = type("dummy", (), {}) + Step = type("dummy", (), {}) PYTEST_BDD = False from reportportal_client import RP, create_client From 93460aaaa42242bb44b8631b40b6493a196b93ae Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 16:55:47 +0300 Subject: [PATCH 030/110] Pytest BDD implementation: WIP --- pytest_reportportal/plugin.py | 39 +++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index c4fb009..18c812a 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -381,7 +381,7 @@ def pytest_fixture_post_finalizer(fixturedef, request) -> Generator[None, Any, N def pytest_bdd_before_scenario(request, feature: Feature, scenario: Scenario) -> Generator[None, Any, None]: """Report BDD scenario start. - :param request: represents fixture execution metadata + :param request: represents item execution metadata :param feature: represents feature file :param scenario: represents scenario from feature file """ @@ -396,7 +396,7 @@ def pytest_bdd_before_scenario(request, feature: Feature, scenario: Scenario) -> def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> Generator[None, Any, None]: """Report BDD scenario finish. - :param request: represents fixture execution metadata + :param request: represents item execution metadata :param feature: represents feature file :param scenario: represents scenario from feature file """ @@ -411,6 +411,14 @@ def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> def pytest_bdd_before_step( request, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any] ) -> Generator[None, Any, None]: + """Report BDD step start. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + :param step: represents step from scenario + :param step_func: represents function for step + """ config = request.config if not config._rp_enabled: yield @@ -427,6 +435,15 @@ def pytest_bdd_after_step( step_func: Callable[..., Any], step_func_args: Dict[str, Any], ) -> Generator[None, Any, None]: + """Report BDD step finish. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + :param step: represents step from scenario + :param step_func: represents function for step + :param step_func_args: represents arguments for step function + """ config = request.config if not config._rp_enabled: yield @@ -444,6 +461,16 @@ def pytest_bdd_step_error( step_func_args: Dict[str, Any], exception, ) -> Generator[None, Any, None]: + """Report BDD step error. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + :param step: represents step from scenario + :param step_func: represents function for step + :param step_func_args: represents arguments for step function + :param exception: represents exception + """ config = request.config if not config._rp_enabled: yield @@ -455,6 +482,14 @@ def pytest_bdd_step_error( def pytest_bdd_step_func_lookup_error( request, feature: Feature, scenario: Scenario, step: Step, exception ) -> Generator[None, Any, None]: + """Report BDD step lookup error. + + :param request: represents item execution metadata + :param feature: represents feature file + :param scenario: represents scenario from feature file + :param step: represents step from scenario + :param exception: represents exception + """ config = request.config if not config._rp_enabled: yield From 392f091a23808582dc5c6cf46c851eae9df3d73b Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 17:27:56 +0300 Subject: [PATCH 031/110] Rename PyTestServiceClass to PyTestService --- pytest_reportportal/plugin.py | 4 ++-- pytest_reportportal/service.py | 4 ++-- tests/unit/conftest.py | 4 ++-- tests/unit/test_plugin.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index 18c812a..b738aeb 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -33,7 +33,7 @@ from pytest_reportportal import LAUNCH_WAIT_TIMEOUT from pytest_reportportal.config import AgentConfig from pytest_reportportal.rp_logging import patching_logger_class, patching_thread_class -from pytest_reportportal.service import PyTestServiceClass +from pytest_reportportal.service import PyTestService try: # noinspection PyPackageRequirements @@ -237,7 +237,7 @@ def pytest_configure(config) -> None: config._reporter_config = agent_config if is_control(config): - config.py_test_service = PyTestServiceClass(agent_config) + config.py_test_service = PyTestService(agent_config) else: # noinspection PyUnresolvedReferences config.py_test_service = pickle.loads(config.workerinput["py_test_service"]) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 34fd069..5dd6d00 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -126,7 +126,7 @@ def check_rp_enabled(func): @wraps(func) def wrap(*args, **kwargs): - if args and isinstance(args[0], PyTestServiceClass): + if args and isinstance(args[0], PyTestService): if not args[0].rp: return return func(*args, **kwargs) @@ -134,7 +134,7 @@ def wrap(*args, **kwargs): return wrap -class PyTestServiceClass: +class PyTestService: """Pytest service class for reporting test results to the Report Portal.""" _config: AgentConfig diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index d7fd093..8c5e0c9 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -24,7 +24,7 @@ from reportportal_client import RPLogger from pytest_reportportal.config import AgentConfig -from pytest_reportportal.service import PyTestServiceClass +from pytest_reportportal.service import PyTestService from tests import REPORT_PORTAL_SERVICE ITEM_PATH = py.path.local("examples/test_simple.py") @@ -107,7 +107,7 @@ def mocked_item(mocked_session, mocked_module): @fixture() def rp_service(mocked_config): """Prepare instance of the PyTestServiceClass for testing.""" - service = PyTestServiceClass(AgentConfig(mocked_config)) + service = PyTestService(AgentConfig(mocked_config)) with mock.patch(REPORT_PORTAL_SERVICE + ".get_project_settings"): service.start() return service diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py index a2efa71..67394c8 100644 --- a/tests/unit/test_plugin.py +++ b/tests/unit/test_plugin.py @@ -35,7 +35,7 @@ pytest_sessionstart, wait_launch, ) -from pytest_reportportal.service import PyTestServiceClass +from pytest_reportportal.service import PyTestService def test_is_control(mocked_config): @@ -101,7 +101,7 @@ def test_pytest_configure(mocked_config): mocked_config.option.rp_project = None pytest_configure(mocked_config) expect(mocked_config._rp_enabled is True) - expect(lambda: isinstance(mocked_config.py_test_service, PyTestServiceClass)) + expect(lambda: isinstance(mocked_config.py_test_service, PyTestService)) assert_expectations() mocked_config.getoption.assert_has_calls( [mock.call("--collect-only", default=False), mock.call("--setup-plan", default=False)] From 703b4a108adc75041f039a5c59587df3c536db90 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 17:28:35 +0300 Subject: [PATCH 032/110] Rename PyTestServiceClass to PyTestService --- tests/unit/conftest.py | 2 +- tests/unit/test_plugin.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 8c5e0c9..28cdb51 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -106,7 +106,7 @@ def mocked_item(mocked_session, mocked_module): @fixture() def rp_service(mocked_config): - """Prepare instance of the PyTestServiceClass for testing.""" + """Prepare instance of the PyTestService for testing.""" service = PyTestService(AgentConfig(mocked_config)) with mock.patch(REPORT_PORTAL_SERVICE + ".get_project_settings"): service.start() diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py index 67394c8..5907e54 100644 --- a/tests/unit/test_plugin.py +++ b/tests/unit/test_plugin.py @@ -74,7 +74,7 @@ def test_logger_handle_no_attachment(mock_handler, logger, log_level): @mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) -@mock.patch("pytest_reportportal.plugin.PyTestServiceClass") +@mock.patch("pytest_reportportal.plugin.PyTestService") def test_portal_on_maintenance(mocked_service_class, mocked_config, mocked_session): """Test session configuration if RP is in maintenance mode. From c2700bf484137cdd6578c09291106fd59aa14372 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 18:01:46 +0300 Subject: [PATCH 033/110] Do not start item if pytest-bdd detected --- pytest_reportportal/plugin.py | 30 +++++++++++++++--------------- tests/unit/test_plugin.py | 10 +++++----- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index b738aeb..b3ea300 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -46,7 +46,7 @@ Step = type("dummy", (), {}) PYTEST_BDD = False -log: Logger = logging.getLogger(__name__) +LOGGER: Logger = logging.getLogger(__name__) MANDATORY_PARAMETER_MISSED_PATTERN: str = ( "One of the following mandatory parameters is unset: " @@ -114,8 +114,8 @@ def pytest_sessionstart(session: Session) -> None: try: config.py_test_service.start() except ResponseError as response_error: - log.warning("Failed to initialize reportportal-client service. " "Reporting is disabled.") - log.debug(str(response_error)) + LOGGER.warning("Failed to initialize reportportal-client service. " "Reporting is disabled.") + LOGGER.debug(str(response_error)) config.py_test_service.rp = None config._rp_enabled = False return @@ -124,7 +124,7 @@ def pytest_sessionstart(session: Session) -> None: config.py_test_service.start_launch() if config.pluginmanager.hasplugin("xdist") or config.pluginmanager.hasplugin("pytest-parallel"): if not wait_launch(session.config.py_test_service.rp): - log.error(FAILED_LAUNCH_WAIT) + LOGGER.error(FAILED_LAUNCH_WAIT) config.py_test_service.rp = None config._rp_enabled = False @@ -196,8 +196,8 @@ def check_connection(agent_config: AgentConfig): resp.raise_for_status() return True except requests.exceptions.RequestException as exc: - log.exception(exc) - log.error("Unable to connect to Report Portal, the launch won't be reported") + LOGGER.exception(exc) + LOGGER.error("Unable to connect to Report Portal, the launch won't be reported") return False @@ -223,15 +223,15 @@ def pytest_configure(config) -> None: cond = (agent_config.rp_project, agent_config.rp_endpoint, agent_config.rp_api_key) config._rp_enabled = all(cond) if not config._rp_enabled: - log.debug(MANDATORY_PARAMETER_MISSED_PATTERN.format(*cond)) - log.debug("Disabling reporting to RP.") + LOGGER.debug(MANDATORY_PARAMETER_MISSED_PATTERN.format(*cond)) + LOGGER.debug("Disabling reporting to RP.") return if not agent_config.rp_skip_connection_test: config._rp_enabled = check_connection(agent_config) if not config._rp_enabled: - log.debug("Failed to establish connection with RP. " "Disabling reporting.") + LOGGER.debug("Failed to establish connection with RP. " "Disabling reporting.") return config._reporter_config = agent_config @@ -275,13 +275,11 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: yield return - if PYTEST_BDD and item.location[0].endswith("/pytest_bdd/scenario.py"): - yield - return - service = config.py_test_service agent_config = config._reporter_config - service.start_pytest_item(item) + if not PYTEST_BDD or not item.location[0].endswith("/pytest_bdd/scenario.py"): + service.start_pytest_item(item) + log_level = agent_config.rp_log_level or logging.NOTSET log_handler = RPLogHandler( level=log_level, @@ -295,7 +293,9 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: with patching_logger_class(): with _pytest.logging.catching_logs(log_handler, level=log_level): yield - service.finish_pytest_item(item) + + if not PYTEST_BDD or not item.location[0].endswith("/pytest_bdd/scenario.py"): + service.finish_pytest_item(item) # noinspection PyProtectedMember diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py index 5907e54..bf64450 100644 --- a/tests/unit/test_plugin.py +++ b/tests/unit/test_plugin.py @@ -27,7 +27,7 @@ FAILED_LAUNCH_WAIT, MANDATORY_PARAMETER_MISSED_PATTERN, is_control, - log, + LOGGER, pytest_addoption, pytest_collection_finish, pytest_configure, @@ -117,7 +117,7 @@ def test_pytest_configure_dry_run(mocked_config): @mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) -@mock.patch("pytest_reportportal.plugin.log", wraps=log) +@mock.patch("pytest_reportportal.plugin.log", wraps=LOGGER) def test_pytest_configure_misssing_rp_endpoint(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_endpoint. @@ -147,7 +147,7 @@ def test_pytest_configure_misssing_rp_endpoint(mocked_log, mocked_config): @mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) -@mock.patch("pytest_reportportal.plugin.log", wraps=log) +@mock.patch("pytest_reportportal.plugin.log", wraps=LOGGER) def test_pytest_configure_misssing_rp_project(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_project. @@ -177,7 +177,7 @@ def test_pytest_configure_misssing_rp_project(mocked_log, mocked_config): @mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) -@mock.patch("pytest_reportportal.plugin.log", wraps=log) +@mock.patch("pytest_reportportal.plugin.log", wraps=LOGGER) def test_pytest_configure_misssing_rp_uuid(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_uuid. @@ -265,7 +265,7 @@ def test_pytest_sessionstart(mocked_session): assert_expectations() -@mock.patch("pytest_reportportal.plugin.log", wraps=log) +@mock.patch("pytest_reportportal.plugin.log", wraps=LOGGER) @mock.patch("pytest_reportportal.plugin.is_control", mock.Mock()) @mock.patch("pytest_reportportal.plugin.wait_launch", mock.Mock(return_value=False)) def test_pytest_sessionstart_launch_wait_fail(mocked_log, mocked_session): From 7bb0c12f841cd7d7655954e8650f831be197c178 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 22:55:44 +0300 Subject: [PATCH 034/110] Fix tests --- tests/unit/test_plugin.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py index bf64450..ffc02e3 100644 --- a/tests/unit/test_plugin.py +++ b/tests/unit/test_plugin.py @@ -117,7 +117,7 @@ def test_pytest_configure_dry_run(mocked_config): @mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) -@mock.patch("pytest_reportportal.plugin.log", wraps=LOGGER) +@mock.patch("pytest_reportportal.plugin.LOGGER", wraps=LOGGER) def test_pytest_configure_misssing_rp_endpoint(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_endpoint. @@ -147,7 +147,7 @@ def test_pytest_configure_misssing_rp_endpoint(mocked_log, mocked_config): @mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) -@mock.patch("pytest_reportportal.plugin.log", wraps=LOGGER) +@mock.patch("pytest_reportportal.plugin.LOGGER", wraps=LOGGER) def test_pytest_configure_misssing_rp_project(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_project. @@ -177,7 +177,7 @@ def test_pytest_configure_misssing_rp_project(mocked_log, mocked_config): @mock.patch("pytest_reportportal.plugin.requests.get", mock.Mock()) -@mock.patch("pytest_reportportal.plugin.log", wraps=LOGGER) +@mock.patch("pytest_reportportal.plugin.LOGGER", wraps=LOGGER) def test_pytest_configure_misssing_rp_uuid(mocked_log, mocked_config): """Test plugin configuration in case of missing rp_uuid. @@ -265,7 +265,7 @@ def test_pytest_sessionstart(mocked_session): assert_expectations() -@mock.patch("pytest_reportportal.plugin.log", wraps=LOGGER) +@mock.patch("pytest_reportportal.plugin.LOGGER", wraps=LOGGER) @mock.patch("pytest_reportportal.plugin.is_control", mock.Mock()) @mock.patch("pytest_reportportal.plugin.wait_launch", mock.Mock(return_value=False)) def test_pytest_sessionstart_launch_wait_fail(mocked_log, mocked_session): From 78d353d67b0f360152a6db1f0e5496db620e0c46 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 22:57:49 +0300 Subject: [PATCH 035/110] Fix imports --- tests/unit/test_plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_plugin.py b/tests/unit/test_plugin.py index ffc02e3..9fc77a6 100644 --- a/tests/unit/test_plugin.py +++ b/tests/unit/test_plugin.py @@ -25,9 +25,9 @@ from pytest_reportportal.config import AgentConfig from pytest_reportportal.plugin import ( FAILED_LAUNCH_WAIT, + LOGGER, MANDATORY_PARAMETER_MISSED_PATTERN, is_control, - LOGGER, pytest_addoption, pytest_collection_finish, pytest_configure, From 86ddb5726d6958a35942c5cff63741da78d5561c Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 12 Feb 2025 23:10:50 +0300 Subject: [PATCH 036/110] Update bdd test --- tests/integration/test_bdd.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 8e437a0..d17aebe 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -26,4 +26,13 @@ def test_bdd(mock_client_init): assert int(result) == 0, "Exit code should be 0 (no errors)" mock_client = mock_client_init.return_value - assert mock_client.start_test_item.call_count == 0, '"start_test_item" should not be called for BDD tests' + assert mock_client.start_test_item.call_count == 6, 'There should be exactly six "start_test_item" calls' + assert mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count, \ + '"start_test_item" and "finish_test_item" should be called the same number of times' + + + # Check that scenarios and steps are reported correctly + scenario_calls = [call for call in mock_client.start_test_item.call_args_list if call[1]['item_type'] == 'SCENARIO'] + step_calls = [call for call in mock_client.start_test_item.call_args_list if call[1]['item_type'] == 'STEP'] + assert len(scenario_calls) == 1, "There should be exactly one Scenario reported" + assert len(step_calls) == 4, "There should be exactly four Steps reported" From 9bab6c5a6f932aa418193ed02000e0afa5b3d7e5 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 13:49:36 +0300 Subject: [PATCH 037/110] Pytest BDD implementation: WIP --- pytest_reportportal/plugin.py | 23 ++++++++++++++++++----- pytest_reportportal/service.py | 15 +++++++++++++++ 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index b3ea300..8c1b0cf 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -389,7 +389,8 @@ def pytest_bdd_before_scenario(request, feature: Feature, scenario: Scenario) -> if not config._rp_enabled: yield return - + service = config.py_test_service + service.start_bdd_scenario(feature, scenario) yield @pytest.hookimpl(hookwrapper=True) @@ -406,6 +407,8 @@ def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> return yield + service = config.py_test_service + service.finish_bdd_scenario(feature, scenario) @pytest.hookimpl(hookwrapper=True) def pytest_bdd_before_step( @@ -424,6 +427,8 @@ def pytest_bdd_before_step( yield return + service = config.py_test_service + service.start_bdd_step(feature, scenario, step) yield @pytest.hookimpl(hookwrapper=True) @@ -450,6 +455,8 @@ def pytest_bdd_after_step( return yield + service = config.py_test_service + service.finish_bdd_step(feature, scenario, step) @pytest.hookimpl(hookwrapper=True) def pytest_bdd_step_error( @@ -457,8 +464,8 @@ def pytest_bdd_step_error( feature: Feature, scenario: Scenario, step: Step, - step_func: Callable[..., Any], - step_func_args: Dict[str, Any], + _: Callable[..., Any], + __: Dict[str, Any], exception, ) -> Generator[None, Any, None]: """Report BDD step error. @@ -467,8 +474,8 @@ def pytest_bdd_step_error( :param feature: represents feature file :param scenario: represents scenario from feature file :param step: represents step from scenario - :param step_func: represents function for step - :param step_func_args: represents arguments for step function + :param _: represents function for step + :param __: represents arguments for step function :param exception: represents exception """ config = request.config @@ -477,6 +484,8 @@ def pytest_bdd_step_error( return yield + service = config.py_test_service + service.report_bdd_step_error(feature, scenario, step, exception) @pytest.hookimpl(hookwrapper=True) def pytest_bdd_step_func_lookup_error( @@ -495,7 +504,11 @@ def pytest_bdd_step_func_lookup_error( yield return + service = config.py_test_service + service.start_bdd_step(feature, scenario, step) yield + service.report_bdd_step_error(feature, scenario, step, exception) + service.finish_bdd_step(feature, scenario, step) # no types for backward compatibility for older pytest versions diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 5dd6d00..38cb297 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -932,6 +932,21 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None LOGGER.exception(e) reporter.finish_nested_step(item_id, timestamp(), "FAILED") + def start_bdd_scenario(self, feature: Feature, scenario: Scenario): + pass + + def finish_bdd_scenario(self, feature: Feature, scenario: Scenario): + pass + + def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step): + pass + + def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step): + pass + + def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step, exception: Exception): + pass + def start(self) -> None: """Start servicing Report Portal requests.""" self.parent_item_id = self._config.rp_parent_item_id From 751882884c002f523521e7f074a19c89a6babc5b Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 15:01:43 +0300 Subject: [PATCH 038/110] Pytest BDD implementation: rollback argument rename and supress some warnings --- pytest_reportportal/plugin.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index 8c1b0cf..c528334 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -386,6 +386,7 @@ def pytest_bdd_before_scenario(request, feature: Feature, scenario: Scenario) -> :param scenario: represents scenario from feature file """ config = request.config + # noinspection PyProtectedMember if not config._rp_enabled: yield return @@ -402,6 +403,7 @@ def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> :param scenario: represents scenario from feature file """ config = request.config + # noinspection PyProtectedMember if not config._rp_enabled: yield return @@ -410,6 +412,7 @@ def pytest_bdd_after_scenario(request, feature: Feature, scenario: Scenario) -> service = config.py_test_service service.finish_bdd_scenario(feature, scenario) + # noinspection PyUnusedLocal @pytest.hookimpl(hookwrapper=True) def pytest_bdd_before_step( request, feature: Feature, scenario: Scenario, step: Step, step_func: Callable[..., Any] @@ -423,6 +426,7 @@ def pytest_bdd_before_step( :param step_func: represents function for step """ config = request.config + # noinspection PyProtectedMember if not config._rp_enabled: yield return @@ -431,6 +435,7 @@ def pytest_bdd_before_step( service.start_bdd_step(feature, scenario, step) yield + # noinspection PyUnusedLocal @pytest.hookimpl(hookwrapper=True) def pytest_bdd_after_step( request, @@ -450,6 +455,7 @@ def pytest_bdd_after_step( :param step_func_args: represents arguments for step function """ config = request.config + # noinspection PyProtectedMember if not config._rp_enabled: yield return @@ -458,14 +464,15 @@ def pytest_bdd_after_step( service = config.py_test_service service.finish_bdd_step(feature, scenario, step) + # noinspection PyUnusedLocal @pytest.hookimpl(hookwrapper=True) def pytest_bdd_step_error( request, feature: Feature, scenario: Scenario, step: Step, - _: Callable[..., Any], - __: Dict[str, Any], + step_func: Callable[..., Any], + step_func_args: Dict[str, Any], exception, ) -> Generator[None, Any, None]: """Report BDD step error. @@ -474,11 +481,12 @@ def pytest_bdd_step_error( :param feature: represents feature file :param scenario: represents scenario from feature file :param step: represents step from scenario - :param _: represents function for step - :param __: represents arguments for step function + :param step_func: represents function for step + :param step_func_args: represents arguments for step function :param exception: represents exception """ config = request.config + # noinspection PyProtectedMember if not config._rp_enabled: yield return @@ -500,6 +508,7 @@ def pytest_bdd_step_func_lookup_error( :param exception: represents exception """ config = request.config + # noinspection PyProtectedMember if not config._rp_enabled: yield return From add7f61e8f7ad8f8cdbbc3ef75ddc6db619d307f Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 15:15:18 +0300 Subject: [PATCH 039/110] Pytest BDD implementation: add pydocs and enable checks --- pytest_reportportal/service.py | 51 ++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 38cb297..99f0c6f 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -932,20 +932,55 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None LOGGER.exception(e) reporter.finish_nested_step(item_id, timestamp(), "FAILED") - def start_bdd_scenario(self, feature: Feature, scenario: Scenario): - pass + def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: + """Save BDD scenario and Feature to test tree. The scenario will be started later if a step will be reported. - def finish_bdd_scenario(self, feature: Feature, scenario: Scenario): + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + """ pass - def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step): - pass + def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: + """Finish BDD scenario. Skip if it was not started. - def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step): + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + """ pass - def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step, exception: Exception): - pass + @check_rp_enabled + def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: + """Start BDD step. + + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + :param step: pytest_bdd.Step + """ + if not PYTEST_BDD: + return + + @check_rp_enabled + def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: + """Finish BDD step. + + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + :param step: pytest_bdd.Step + """ + if not PYTEST_BDD: + return + + @check_rp_enabled + def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step, exception: Exception) -> None: + """Report BDD step error. + + :param feature: pytest_bdd.Feature + :param scenario: pytest_bdd.Scenario + :param step: pytest_bdd.Step + :param exception: Exception + """ + if not PYTEST_BDD: + return def start(self) -> None: """Start servicing Report Portal requests.""" From 4d59e19e7b48b10e5a37436d2845df13b6e74f6f Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 15:20:35 +0300 Subject: [PATCH 040/110] Pytest BDD implementation: fix test format --- tests/integration/test_bdd.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index d17aebe..15e88da 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -27,12 +27,14 @@ def test_bdd(mock_client_init): mock_client = mock_client_init.return_value assert mock_client.start_test_item.call_count == 6, 'There should be exactly six "start_test_item" calls' - assert mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count, \ - '"start_test_item" and "finish_test_item" should be called the same number of times' - + assert ( + mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count + ), '"start_test_item" and "finish_test_item" should be called the same number of times' # Check that scenarios and steps are reported correctly - scenario_calls = [call for call in mock_client.start_test_item.call_args_list if call[1]['item_type'] == 'SCENARIO'] - step_calls = [call for call in mock_client.start_test_item.call_args_list if call[1]['item_type'] == 'STEP'] + scenario_calls = [ + call for call in mock_client.start_test_item.call_args_list if call[1]["item_type"] == "SCENARIO" + ] + step_calls = [call for call in mock_client.start_test_item.call_args_list if call[1]["item_type"] == "STEP"] assert len(scenario_calls) == 1, "There should be exactly one Scenario reported" assert len(step_calls) == 4, "There should be exactly four Steps reported" From c8cc4a204e071b931278f73ada79b7ed1eee3020 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 15:25:10 +0300 Subject: [PATCH 041/110] Update isort to fix warnings --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3ef9df4..8b2adf5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,7 +23,7 @@ repos: - id: black args: [ '--check', 'pytest_reportportal', 'tests' ] - repo: https://github.com/pycqa/isort - rev: 5.13.2 + rev: 6.0.0 hooks: - id: isort - repo: https://github.com/pycqa/flake8 From 5c0434cccbd915f7fe6664ad872671ff8d5fd15f Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 15:52:14 +0300 Subject: [PATCH 042/110] Add more types --- pytest_reportportal/service.py | 87 ++++++++++++++++++---------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 99f0c6f..e926844 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -342,7 +342,7 @@ def _generate_names(self, test_tree: Dict[str, Any]) -> None: for item, child_leaf in test_tree["children"].items(): self._generate_names(child_leaf) - def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separator: str): + def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separator: str) -> None: child_items = list(test_tree["children"].items()) if test_tree["type"] not in leaf_types: for item, child_leaf in child_items: @@ -405,7 +405,7 @@ def _truncate_item_name(self, name: str) -> str: ) return name - def _get_item_description(self, test_item): + def _get_item_description(self, test_item: Item) -> Optional[str]: """Get description of item. :param test_item: pytest.Item @@ -432,7 +432,7 @@ def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> return func(leaf) return func(leaf) - def _build_start_suite_rq(self, leaf): + def _build_start_suite_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: code_ref = str(leaf["item"]) if leaf["type"] == LeafType.DIR else str(leaf["item"].fspath) parent_item_id = self._lock(leaf["parent"], lambda p: p.get("item_id")) if "parent" in leaf else None payload = { @@ -445,11 +445,11 @@ def _build_start_suite_rq(self, leaf): } return payload - def _start_suite(self, suite_rq): + def _start_suite(self, suite_rq: Dict[str, Any]) -> Optional[str]: LOGGER.debug("ReportPortal - Start Suite: request_body=%s", suite_rq) return self.rp.start_test_item(**suite_rq) - def _create_suite(self, leaf): + def _create_suite(self, leaf: Dict[str, Any]) -> None: if leaf["exec"] != ExecStatus.CREATED: return item_id = self._start_suite(self._build_start_suite_rq(leaf)) @@ -457,7 +457,7 @@ def _create_suite(self, leaf): leaf["exec"] = ExecStatus.IN_PROGRESS @check_rp_enabled - def _create_suite_path(self, item: Item): + def _create_suite_path(self, item: Item) -> None: path = self._tree_path[item] for leaf in path[1:-1]: if leaf["exec"] != ExecStatus.CREATED: @@ -467,13 +467,15 @@ def _create_suite_path(self, item: Item): def _get_item_name(self, mark) -> Optional[str]: return mark.kwargs.get("name", mark.args[0] if mark.args else None) - def _get_code_ref(self, item): + def _get_code_ref(self, item: Item) -> str: # Generate script path from work dir, use only backslashes to have the # same path on different systems and do not affect Test Case ID on # different systems path = os.path.relpath(str(item.fspath), ROOT_DIR).replace("\\", "/") method_name = ( - item.originalname if hasattr(item, "originalname") and item.originalname is not None else item.name + item.originalname + if hasattr(item, "originalname") and getattr(item, "originalname") is not None + else item.name ) parent = item.parent classes = [method_name] @@ -488,13 +490,13 @@ def _get_code_ref(self, item): class_path = ".".join(classes) return "{0}:{1}".format(path, class_path) - def _get_test_case_id(self, mark, leaf) -> str: - parameters = leaf.get("parameters", None) + def _get_test_case_id(self, mark, leaf: Dict[str, Any]) -> str: + parameters: Optional[Dict[str, Any]] = leaf.get("parameters", None) parameterized = True - selected_params = None + selected_params: Optional[List[str]] = None if mark is not None: parameterized = mark.kwargs.get("parameterized", False) - selected_params = mark.kwargs.get("params", None) + selected_params: Optional[Union[str, List[str]]] = mark.kwargs.get("params", None) if selected_params is not None and not isinstance(selected_params, list): selected_params = [selected_params] @@ -620,7 +622,7 @@ def _get_parameters(self, item) -> Optional[Dict[str, Any]]: return None return {str(k): v.replace("\0", "\\0") if isinstance(v, str) else v for k, v in params.items()} - def _process_test_case_id(self, leaf): + def _process_test_case_id(self, leaf: Dict[str, Any]) -> str: """ Process Test Case ID if set. @@ -632,7 +634,7 @@ def _process_test_case_id(self, leaf): return self._get_test_case_id(tc_ids[0], leaf) return self._get_test_case_id(None, leaf) - def _process_issue(self, item) -> Optional[Issue]: + def _process_issue(self, item: Item) -> Optional[Issue]: """ Process Issue if set. @@ -643,7 +645,7 @@ def _process_issue(self, item) -> Optional[Issue]: if len(issues) > 0: return self._get_issue(issues[0]) - def _process_attributes(self, item): + def _process_attributes(self, item: Item) -> List[Dict[str, Any]]: """ Process attributes of item. @@ -706,14 +708,14 @@ def _build_start_step_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: } return payload - def _start_step(self, step_rq): + def _start_step(self, step_rq: Dict[str, Any]) -> Optional[str]: LOGGER.debug("ReportPortal - Start TestItem: request_body=%s", step_rq) return self.rp.start_test_item(**step_rq) - def __unique_id(self): + def __unique_id(self) -> str: return str(os.getpid()) + "-" + str(threading.current_thread().ident) - def __started(self): + def __started(self) -> bool: return self.__unique_id() in self._start_tracker @check_rp_enabled @@ -737,7 +739,7 @@ def start_pytest_item(self, test_item: Optional[Item] = None): current_leaf["item_id"] = item_id current_leaf["exec"] = ExecStatus.IN_PROGRESS - def process_results(self, test_item, report): + def process_results(self, test_item: Item, report): """ Save test item results after execution. @@ -760,7 +762,7 @@ def process_results(self, test_item, report): if leaf["status"] in (None, "PASSED"): leaf["status"] = "SKIPPED" - def _build_finish_step_rq(self, leaf): + def _build_finish_step_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: issue = leaf.get("issue", None) status = leaf["status"] if status == "SKIPPED" and not self._config.rp_is_skipped_an_issue: @@ -776,26 +778,26 @@ def _build_finish_step_rq(self, leaf): } return payload - def _finish_step(self, finish_rq): + def _finish_step(self, finish_rq: Dict[str, Any]) -> None: LOGGER.debug("ReportPortal - Finish TestItem: request_body=%s", finish_rq) self.rp.finish_test_item(**finish_rq) - def _finish_suite(self, finish_rq): + def _finish_suite(self, finish_rq: Dict[str, Any]) -> None: LOGGER.debug("ReportPortal - End TestSuite: request_body=%s", finish_rq) self.rp.finish_test_item(**finish_rq) - def _build_finish_suite_rq(self, leaf): + def _build_finish_suite_rq(self, leaf) -> Dict[str, Any]: payload = {"end_time": timestamp(), "item_id": leaf["item_id"]} return payload - def _proceed_suite_finish(self, leaf): + def _proceed_suite_finish(self, leaf) -> None: if leaf.get("exec", ExecStatus.FINISHED) == ExecStatus.FINISHED: return self._finish_suite(self._build_finish_suite_rq(leaf)) leaf["exec"] = ExecStatus.FINISHED - def _finish_parents(self, leaf): + def _finish_parents(self, leaf: Dict[str, Any]) -> None: if ( "parent" not in leaf or leaf["parent"] is None @@ -815,13 +817,15 @@ def _finish_parents(self, leaf): self._finish_parents(leaf["parent"]) @check_rp_enabled - def finish_pytest_item(self, test_item): - """ - Finish pytest_item. + def finish_pytest_item(self, test_item: Optional[Item] = None) -> None: + """Finish pytest_item. :param test_item: pytest.Item :return: None """ + if test_item is None: + return + path = self._tree_path[test_item] leaf = path[-1] self._process_metadata_item_finish(leaf) @@ -829,10 +833,10 @@ def finish_pytest_item(self, test_item): leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) - def _get_items(self, exec_status): + def _get_items(self, exec_status) -> List[Item]: return [k for k, v in self._tree_path.items() if v[-1]["exec"] == exec_status] - def finish_suites(self): + def finish_suites(self) -> None: """ Finish all suites in run with status calculations. @@ -855,25 +859,23 @@ def finish_suites(self): if leaf["exec"] == ExecStatus.IN_PROGRESS: self._lock(leaf, lambda p: self._proceed_suite_finish(p)) - def _build_finish_launch_rq(self): + def _build_finish_launch_rq(self) -> Dict[str, Any]: finish_rq = {"end_time": timestamp()} return finish_rq - def _finish_launch(self, finish_rq): + def _finish_launch(self, finish_rq) -> None: LOGGER.debug("ReportPortal - Finish launch: request_body=%s", finish_rq) self.rp.finish_launch(**finish_rq) @check_rp_enabled - def finish_launch(self): - """ - Finish tests launch. - - :return: None - """ + def finish_launch(self) -> None: + """Finish test launch.""" # To finish launch session str parameter is needed self._finish_launch(self._build_finish_launch_rq()) - def _build_log(self, item_id: str, message: str, log_level: str, attachment: Optional[Any] = None): + def _build_log( + self, item_id: str, message: str, log_level: str, attachment: Optional[Any] = None + ) -> Dict[str, Any]: sl_rq = { "item_id": item_id, "time": timestamp(), @@ -885,7 +887,7 @@ def _build_log(self, item_id: str, message: str, log_level: str, attachment: Opt return sl_rq @check_rp_enabled - def post_log(self, test_item, message: str, log_level: str = "INFO", attachment: Optional[Any] = None): + def post_log(self, test_item, message: str, log_level: str = "INFO", attachment: Optional[Any] = None) -> None: """ Send a log message to the Report Portal. @@ -938,7 +940,8 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: :param feature: pytest_bdd.Feature :param scenario: pytest_bdd.Scenario """ - pass + if not self.__started(): + self.start() def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """Finish BDD scenario. Skip if it was not started. @@ -1016,7 +1019,7 @@ def start(self) -> None: # noinspection PyUnresolvedReferences self._start_tracker.add(self.__unique_id()) - def stop(self): + def stop(self) -> None: """Finish servicing Report Portal requests.""" self.rp.close() self.rp = None From eb3c75d87fc8808e8e048043247702a6dab5d86d Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 16:29:22 +0300 Subject: [PATCH 043/110] Pytest BDD implementation: revert Item report skip --- pytest_reportportal/plugin.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index c528334..1d616d4 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -277,8 +277,7 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: service = config.py_test_service agent_config = config._reporter_config - if not PYTEST_BDD or not item.location[0].endswith("/pytest_bdd/scenario.py"): - service.start_pytest_item(item) + service.start_pytest_item(item) log_level = agent_config.rp_log_level or logging.NOTSET log_handler = RPLogHandler( @@ -294,8 +293,7 @@ def pytest_runtest_protocol(item: Item) -> Generator[None, Any, None]: with _pytest.logging.catching_logs(log_handler, level=log_level): yield - if not PYTEST_BDD or not item.location[0].endswith("/pytest_bdd/scenario.py"): - service.finish_pytest_item(item) + service.finish_pytest_item(item) # noinspection PyProtectedMember From c840ed26da8e8e745f08771dcafe81daca43c3ce Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 18:08:15 +0300 Subject: [PATCH 044/110] Pytest BDD implementation: WIP --- pytest_reportportal/service.py | 66 ++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 11 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index e926844..79aad56 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -13,6 +13,7 @@ """This module includes Service functions for work with pytest agent.""" +import re import logging import os.path import sys @@ -20,7 +21,7 @@ from functools import wraps from os import curdir from time import sleep, time -from typing import Any, Callable, Dict, Generator, List, Optional, Set, Tuple, Union +from typing import Any, Callable, Dict, Generator, List, Optional, Set, Union from _pytest.doctest import DoctestItem from aenum import Enum, auto, unique @@ -45,6 +46,9 @@ Dir = type("dummy", (), {}) try: + # noinspection PyPackageRequirements + from pytest_bdd.scenario import make_python_name + # noinspection PyPackageRequirements from pytest_bdd.parser import Feature, Scenario, Step @@ -53,6 +57,7 @@ Feature = type("dummy", (), {}) Scenario = type("dummy", (), {}) Step = type("dummy", (), {}) + make_python_name: Callable[[str], str] = lambda x: x PYTEST_BDD = False from reportportal_client import RP, create_client @@ -60,6 +65,7 @@ LOGGER = logging.getLogger(__name__) +KNOWN_LOG_LEVELS = ("TRACE", "DEBUG", "INFO", "WARN", "ERROR") MAX_ITEM_NAME_LENGTH: int = 1024 TRUNCATION_STR: str = "..." ROOT_DIR: str = str(os.path.abspath(curdir)) @@ -68,6 +74,8 @@ ISSUE_DESCRIPTION_LINE_TEMPLATE: str = "* {}:{}" ISSUE_DESCRIPTION_URL_TEMPLATE: str = " [{issue_id}]({url})" ISSUE_DESCRIPTION_ID_TEMPLATE: str = " {issue_id}" +PYTHON_REPLACE_REGEX = re.compile(r"\W") +ALPHA_REGEX = re.compile(r"^\d+_*") def trim_docstring(docstring: str) -> str: @@ -140,7 +148,8 @@ class PyTestService: _config: AgentConfig _issue_types: Dict[str, str] _tree_path: Dict[Item, List[Dict[str, Any]]] - _log_levels: Tuple[str, str, str, str, str] + _bdd_item_by_name: Dict[str, Item] + _bdd_item_by_scenario: Dict[Scenario, Item] _start_tracker: Set[str] _launch_id: Optional[str] agent_name: str @@ -155,7 +164,7 @@ def __init__(self, agent_config: AgentConfig) -> None: self._config = agent_config self._issue_types = {} self._tree_path = {} - self._log_levels = ("TRACE", "DEBUG", "INFO", "WARN", "ERROR") + self._bdd_item_by_name = {} self._start_tracker = set() self._launch_id = None self.agent_name = "pytest-reportportal" @@ -732,6 +741,10 @@ def start_pytest_item(self, test_item: Optional[Item] = None): if not self.__started(): self.start() + if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + self._bdd_item_by_name[test_item.name] = test_item + return + self._create_suite_path(test_item) current_leaf = self._tree_path[test_item][-1] self._process_metadata_item_start(current_leaf) @@ -826,9 +839,13 @@ def finish_pytest_item(self, test_item: Optional[Item] = None) -> None: if test_item is None: return - path = self._tree_path[test_item] - leaf = path[-1] + leaf = self._tree_path[test_item][-1] self._process_metadata_item_finish(leaf) + + if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + del self._bdd_item_by_name[test_item.name] + return + self._finish_step(self._build_finish_step_rq(leaf)) leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) @@ -887,7 +904,7 @@ def _build_log( return sl_rq @check_rp_enabled - def post_log(self, test_item, message: str, log_level: str = "INFO", attachment: Optional[Any] = None) -> None: + def post_log(self, test_item: Item, message: str, log_level: str = "INFO", attachment: Optional[Any] = None) -> None: """ Send a log message to the Report Portal. @@ -898,9 +915,9 @@ def post_log(self, test_item, message: str, log_level: str = "INFO", attachment: :param attachment: attachment file :return: None """ - if log_level not in self._log_levels: + if log_level not in KNOWN_LOG_LEVELS: LOGGER.warning( - "Incorrect loglevel = %s. Force set to INFO. " "Available levels: %s.", log_level, self._log_levels + "Incorrect loglevel = %s. Force set to INFO. " "Available levels: %s.", log_level, KNOWN_LOG_LEVELS ) item_id = self._tree_path[test_item][-1]["item_id"] @@ -934,14 +951,30 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None LOGGER.exception(e) reporter.finish_nested_step(item_id, timestamp(), "FAILED") + def _get_python_name(self, name: str) -> str: + python_name = f"test_{make_python_name(name)}" + same_scenario_names = [name for name in self._bdd_item_by_name.keys() if name.startswith(python_name)] + if len(same_scenario_names) < 1: + return python_name + elif len(same_scenario_names) == 1: + return same_scenario_names[0] + else: + indexes = sorted([int(name.split("_")[-1]) for name in same_scenario_names]) + return f"{python_name}_{indexes[-1]}" + def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """Save BDD scenario and Feature to test tree. The scenario will be started later if a step will be reported. :param feature: pytest_bdd.Feature :param scenario: pytest_bdd.Scenario """ - if not self.__started(): - self.start() + if not PYTEST_BDD: + return + item_name = self._get_python_name(scenario.name) + test_item = self._bdd_item_by_name.get(item_name, None) + if test_item is None: + return + self._bdd_item_by_scenario[scenario] = test_item def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """Finish BDD scenario. Skip if it was not started. @@ -949,7 +982,14 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: :param feature: pytest_bdd.Feature :param scenario: pytest_bdd.Scenario """ - pass + if not PYTEST_BDD: + return + + test_item = self._bdd_item_by_scenario.pop(scenario) + leaf = self._tree_path[test_item][-1] + self._finish_step(self._build_finish_step_rq(leaf)) + leaf["exec"] = ExecStatus.FINISHED + self._finish_parents(leaf) @check_rp_enabled def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: @@ -984,6 +1024,10 @@ def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step """ if not PYTEST_BDD: return + test_item = self._bdd_item_by_scenario[scenario] + self.post_log(test_item, str(exception), log_level="ERROR") + leaf = self._tree_path[test_item][-1] + leaf["status"] = "FAILED" def start(self) -> None: """Start servicing Report Portal requests.""" From 9098c3f24676c422b92168ad3d34b2f2e87ecec2 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 13 Feb 2025 18:09:37 +0300 Subject: [PATCH 045/110] Pytest BDD implementation: WIP --- pytest_reportportal/service.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 79aad56..17c4cb2 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -13,9 +13,9 @@ """This module includes Service functions for work with pytest agent.""" -import re import logging import os.path +import re import sys import threading from functools import wraps @@ -47,10 +47,9 @@ try: # noinspection PyPackageRequirements - from pytest_bdd.scenario import make_python_name - # noinspection PyPackageRequirements from pytest_bdd.parser import Feature, Scenario, Step + from pytest_bdd.scenario import make_python_name PYTEST_BDD = True except ImportError: @@ -904,7 +903,9 @@ def _build_log( return sl_rq @check_rp_enabled - def post_log(self, test_item: Item, message: str, log_level: str = "INFO", attachment: Optional[Any] = None) -> None: + def post_log( + self, test_item: Item, message: str, log_level: str = "INFO", attachment: Optional[Any] = None + ) -> None: """ Send a log message to the Report Portal. From 3700cc92fc3d880eff9c64510acce045dd167b6d Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 14 Feb 2025 12:50:47 +0300 Subject: [PATCH 046/110] Fix fixture failure logging --- pytest_reportportal/service.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 17c4cb2..f0b3507 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -945,7 +945,8 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None if exception: if type(exception).__name__ != "Skipped": status = "FAILED" - self.post_log(name, error_msg, log_level="ERROR") + exception_log = self._build_log(item_id, error_msg, log_level="ERROR") + self.rp.log(**exception_log) reporter.finish_nested_step(item_id, timestamp(), status) except Exception as e: LOGGER.error("Failed to report fixture: %s", name) From 50a949895ef45c2e7afd4d55da23caa9f5f9026c Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 14 Feb 2025 13:27:01 +0300 Subject: [PATCH 047/110] Fix fixture failure logging --- pytest_reportportal/service.py | 6 +++++- tests/integration/test_fixtures.py | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index f0b3507..9f75965 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -18,6 +18,7 @@ import re import sys import threading +import traceback from functools import wraps from os import curdir from time import sleep, time @@ -945,7 +946,10 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None if exception: if type(exception).__name__ != "Skipped": status = "FAILED" - exception_log = self._build_log(item_id, error_msg, log_level="ERROR") + error_log = self._build_log(item_id, error_msg, log_level="ERROR") + self.rp.log(**error_log) + traceback_str = "\n".join(traceback.format_exception(exception)) + exception_log = self._build_log(item_id, traceback_str, log_level="ERROR") self.rp.log(**exception_log) reporter.finish_nested_step(item_id, timestamp(), status) except Exception as e: diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index d68c320..85184bd 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -213,7 +213,7 @@ def test_fixture_setup_failure(mock_client_init): assert not setup_call_kwargs["has_stats"] log_count = mock_client.log.call_count - assert log_count == 2, 'Incorrect number of "log" calls' + assert log_count == 4, 'Incorrect number of "log" calls' log_call_args_list = mock_client.log.call_args_list log_call_args = log_call_args_list[0][0] @@ -222,7 +222,7 @@ def test_fixture_setup_failure(mock_client_init): assert log_call_args[1] == LOG_MESSAGE_SETUP_FAILURE assert log_call_kwargs["item_id"] == f"{step_name}_1" - log_call_kwargs = log_call_args_list[1][1] + log_call_kwargs = log_call_args_list[3][1] assert log_call_kwargs["message"].endswith( "examples/fixtures/test_fixture_setup_failure/conftest.py:30: Exception" From 76de9b6fc8064d3cf00aca17ec6e2008ac072d02 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 14 Feb 2025 13:27:37 +0300 Subject: [PATCH 048/110] Fix fixture failure logging --- tests/integration/test_fixtures.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index 85184bd..0a7631f 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -14,6 +14,7 @@ import sys from collections import defaultdict +from typing import Optional from unittest import mock import pytest @@ -59,7 +60,7 @@ def get_last_item_id() -> str: return ITEM_ID_LIST[-1] -def remove_last_item_id(*_, **__) -> str: +def remove_last_item_id(*_, **__) -> Optional[str]: if len(ITEM_ID_LIST) > 0: return ITEM_ID_LIST.pop() From 26580fc4df87b07169cbfefa04deb30cf04d0055 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 14 Feb 2025 15:56:31 +0300 Subject: [PATCH 049/110] Update test --- tests/integration/test_fixtures.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index 0a7631f..0ae828b 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -192,6 +192,9 @@ def test_fixture_teardown(mock_client_init): ) +FIXTURE_FAILED_MESSAGE = "function fixture setup failed: test_fixture_setup_failure_config" + + @pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_setup_failure(mock_client_init): @@ -223,6 +226,14 @@ def test_fixture_setup_failure(mock_client_init): assert log_call_args[1] == LOG_MESSAGE_SETUP_FAILURE assert log_call_kwargs["item_id"] == f"{step_name}_1" + log_call_kwargs = log_call_args_list[1][1] + assert log_call_kwargs["message"] == FIXTURE_FAILED_MESSAGE + assert log_call_kwargs["item_id"] == f"{step_name}_1" + + log_call_kwargs = log_call_args_list[2][1] + assert log_call_kwargs["message"].startswith("Traceback (most recent call last):") + assert log_call_kwargs["item_id"] == f"{step_name}_1" + log_call_kwargs = log_call_args_list[3][1] assert log_call_kwargs["message"].endswith( From 32f4b9e14adf6d4c2fcc331aedeee5206b611415 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 14 Feb 2025 18:46:33 +0300 Subject: [PATCH 050/110] Pytest BDD implementation: WIP --- pytest_reportportal/service.py | 62 ++++++++++++++++++++++++++-------- 1 file changed, 48 insertions(+), 14 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 9f75965..4447c6a 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -118,6 +118,7 @@ class LeafType(Enum): FILE = auto() CODE = auto() ROOT = auto() + NESTED = auto() @unique @@ -147,7 +148,7 @@ class PyTestService: _config: AgentConfig _issue_types: Dict[str, str] - _tree_path: Dict[Item, List[Dict[str, Any]]] + _tree_path: Dict[Any, List[Dict[str, Any]]] _bdd_item_by_name: Dict[str, Item] _bdd_item_by_scenario: Dict[Scenario, Item] _start_tracker: Set[str] @@ -165,6 +166,7 @@ def __init__(self, agent_config: AgentConfig) -> None: self._issue_types = {} self._tree_path = {} self._bdd_item_by_name = {} + self._bdd_item_by_scenario = {} self._start_tracker = set() self._launch_id = None self.agent_name = "pytest-reportportal" @@ -256,8 +258,8 @@ def _get_tree_path(self, item: Item) -> List[Item]: path.reverse() return path - def _get_leaf( - self, leaf_type, parent_item: Optional[Dict[str, Any]], item: Optional[Item], item_id: Optional[str] = None + def _create_leaf( + self, leaf_type, parent_item: Optional[Dict[str, Any]], item: Optional[Any], item_id: Optional[str] = None ) -> Dict[str, Any]: """Construct a leaf for the itest tree. @@ -282,7 +284,7 @@ def _build_test_tree(self, session: Session) -> Dict[str, Any]: :param session: pytest.Session object of the current execution :return: a tree of all tests and their suites """ - test_tree = self._get_leaf(LeafType.ROOT, None, None, item_id=self.parent_item_id) + test_tree = self._create_leaf(LeafType.ROOT, None, None, item_id=self.parent_item_id) for item in session.items: dir_path = self._get_item_dirs(item) @@ -299,7 +301,7 @@ def _build_test_tree(self, session: Session) -> Dict[str, Any]: leaf_type = LeafType.CODE if leaf not in children_leafs: - children_leafs[leaf] = self._get_leaf(leaf_type, current_leaf, leaf) + children_leafs[leaf] = self._create_leaf(leaf_type, current_leaf, leaf) current_leaf = children_leafs[leaf] return test_tree @@ -345,6 +347,8 @@ def _generate_names(self, test_tree: Dict[str, Any]) -> None: item = test_tree["item"] if isinstance(item, Module): test_tree["name"] = os.path.split(str(item.fspath))[1] + if isinstance(item, Feature) or isinstance(item, Scenario): + test_tree["name"] = f"{item.keyword}: {item.name}" else: test_tree["name"] = item.name @@ -414,7 +418,7 @@ def _truncate_item_name(self, name: str) -> str: ) return name - def _get_item_description(self, test_item: Item) -> Optional[str]: + def _get_item_description(self, test_item: Any) -> Optional[str]: """Get description of item. :param test_item: pytest.Item @@ -427,6 +431,8 @@ def _get_item_description(self, test_item: Item) -> Optional[str]: return trim_docstring(doc) if isinstance(test_item, DoctestItem): return test_item.reportinfo()[2] + if isinstance(test_item, Feature): + return test_item.description def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> Any: """ @@ -466,7 +472,7 @@ def _create_suite(self, leaf: Dict[str, Any]) -> None: leaf["exec"] = ExecStatus.IN_PROGRESS @check_rp_enabled - def _create_suite_path(self, item: Item) -> None: + def _create_suite_path(self, item: Any) -> None: path = self._tree_path[item] for leaf in path[1:-1]: if leaf["exec"] != ExecStatus.CREATED: @@ -976,11 +982,18 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """ if not PYTEST_BDD: return - item_name = self._get_python_name(scenario.name) - test_item = self._bdd_item_by_name.get(item_name, None) - if test_item is None: - return - self._bdd_item_by_scenario[scenario] = test_item + root_leaf = next(self._tree_path.values().__iter__())[0] + children_leafs = root_leaf["children"] + feature_leaf = self._create_leaf(LeafType.FILE, root_leaf, feature) + children_leafs[feature] = feature_leaf + children_leafs = feature_leaf["children"] + scenario_leaf = self._create_leaf(LeafType.CODE, feature_leaf, scenario) + children_leafs[scenario] = scenario_leaf + self._remove_file_names(root_leaf) + self._generate_names(root_leaf) + if not self._config.rp_hierarchy_code: + self._merge_code(root_leaf) + self._build_item_paths(root_leaf, []) def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """Finish BDD scenario. Skip if it was not started. @@ -991,12 +1004,24 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: if not PYTEST_BDD: return - test_item = self._bdd_item_by_scenario.pop(scenario) - leaf = self._tree_path[test_item][-1] + leaf = self._tree_path[scenario][-1] self._finish_step(self._build_finish_step_rq(leaf)) leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) + def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: + """ + Process all types of scenario metadata for its start event. + + :param leaf: item context + """ + scenario = leaf["item"] + leaf["description"] = scenario.description + # TODO: Add support for pytest-bdd parameters + # leaf["code_ref"] = scenario.location + # leaf["test_case_id"] = self._get_test_case_id(None, leaf) + # leaf["attributes"] = self._process_attributes(scenario) + @check_rp_enabled def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: """Start BDD step. @@ -1008,6 +1033,15 @@ def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> No if not PYTEST_BDD: return + self._create_suite_path(scenario) + scenario_leaf = self._tree_path[scenario][-1] + if scenario_leaf["exec"] != ExecStatus.IN_PROGRESS: + self._process_scenario_metadata(scenario_leaf) + scenario_id = self._start_step(self._build_start_step_rq(scenario_leaf)) + reporter = self.rp.step_reporter + item_id = reporter.start_nested_step(step.name, timestamp()) + # TODO: Finish nested step logic + @check_rp_enabled def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: """Finish BDD step. From 0ed5435be833a5ddc40c8201968c92394fe0c599 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Sat, 15 Feb 2025 12:16:37 +0300 Subject: [PATCH 051/110] Update pre-commit version --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 60670a9..666426f 100644 --- a/tox.ini +++ b/tox.ini @@ -21,7 +21,7 @@ commands = pytest --cov={envsitepackagesdir}/pytest_reportportal --cov-report=xm [testenv:pep] skip_install = True -deps = pre-commit>=1.11.0 +deps = pre-commit>=1.19.0 commands = pre-commit run --all-files --show-diff-on-failure [gh-actions] From 404755eb31e33c4a3078112edd3f3d87fc2c51b4 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Mon, 17 Feb 2025 17:55:40 +0300 Subject: [PATCH 052/110] Pytest BDD implementation: WIP --- pytest_reportportal/service.py | 112 ++++++++++++++++++++++----------- tests/integration/test_bdd.py | 27 +++++--- 2 files changed, 94 insertions(+), 45 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 4447c6a..1c22a36 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -27,7 +27,7 @@ from _pytest.doctest import DoctestItem from aenum import Enum, auto, unique from py.path import local -from pytest import Class, Function, Item, Module, Package, PytestWarning, Session +from pytest import Class, Function, Item, Mark, Module, Package, PytestWarning, Session from reportportal_client.aio import Task from reportportal_client.core.rp_issues import ExternalIssue, Issue from reportportal_client.helpers import timestamp @@ -118,6 +118,7 @@ class LeafType(Enum): FILE = auto() CODE = auto() ROOT = auto() + SUITE = auto() NESTED = auto() @@ -149,8 +150,7 @@ class PyTestService: _config: AgentConfig _issue_types: Dict[str, str] _tree_path: Dict[Any, List[Dict[str, Any]]] - _bdd_item_by_name: Dict[str, Item] - _bdd_item_by_scenario: Dict[Scenario, Item] + _bdd_root_leaf: Optional[Dict[str, Any]] _start_tracker: Set[str] _launch_id: Optional[str] agent_name: str @@ -165,8 +165,7 @@ def __init__(self, agent_config: AgentConfig) -> None: self._config = agent_config self._issue_types = {} self._tree_path = {} - self._bdd_item_by_name = {} - self._bdd_item_by_scenario = {} + self._bdd_root_leaf = None self._start_tracker = set() self._launch_id = None self.agent_name = "pytest-reportportal" @@ -347,7 +346,10 @@ def _generate_names(self, test_tree: Dict[str, Any]) -> None: item = test_tree["item"] if isinstance(item, Module): test_tree["name"] = os.path.split(str(item.fspath))[1] - if isinstance(item, Feature) or isinstance(item, Scenario): + elif isinstance(item, Feature): + name = item.name if item.name else item.rel_filename + test_tree["name"] = f"{item.keyword}: {name}" + elif isinstance(item, Scenario): test_tree["name"] = f"{item.keyword}: {item.name}" else: test_tree["name"] = item.name @@ -374,8 +376,11 @@ def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separato def _merge_dirs(self, test_tree: Dict[str, Any]) -> None: self._merge_leaf_types(test_tree, {LeafType.DIR}, self._config.rp_dir_path_separator) + def _merge_code_with_separator(self, test_tree: Dict[str, Any], separator: str) -> None: + self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE}, separator) + def _merge_code(self, test_tree: Dict[str, Any]) -> None: - self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE}, "::") + self._merge_code_with_separator(test_tree, "::") def _build_item_paths(self, leaf: Dict[str, Any], path: List[Dict[str, Any]]) -> None: if "children" in leaf and len(leaf["children"]) > 0: @@ -448,7 +453,15 @@ def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> return func(leaf) def _build_start_suite_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: - code_ref = str(leaf["item"]) if leaf["type"] == LeafType.DIR else str(leaf["item"].fspath) + if leaf["type"] == LeafType.DIR: + code_ref = str(leaf["item"]) + elif leaf["type"] == LeafType.FILE: + if isinstance(leaf["item"], Feature): + code_ref = str(leaf["item"].rel_filename) + else: + code_ref = str(leaf["item"].fspath) + else: + code_ref = str(leaf["item"].fspath) parent_item_id = self._lock(leaf["parent"], lambda p: p.get("item_id")) if "parent" in leaf else None payload = { "name": self._truncate_item_name(leaf["name"]), @@ -566,7 +579,7 @@ def _get_issue_description_line(self, mark, default_url): issues += template.format(issue_id=issue_id, url=issue_url) return ISSUE_DESCRIPTION_LINE_TEMPLATE.format(reason, issues) - def _get_issue(self, mark) -> Optional[Issue]: + def _get_issue(self, mark: Mark) -> Optional[Issue]: """Add issues description and issue_type to the test item. :param mark: pytest mark @@ -748,7 +761,6 @@ def start_pytest_item(self, test_item: Optional[Item] = None): self.start() if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): - self._bdd_item_by_name[test_item.name] = test_item return self._create_suite_path(test_item) @@ -783,7 +795,7 @@ def process_results(self, test_item: Item, report): def _build_finish_step_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: issue = leaf.get("issue", None) - status = leaf["status"] + status = leaf.get("status", "PASSED") if status == "SKIPPED" and not self._config.rp_is_skipped_an_issue: issue = NOT_ISSUE if status == "PASSED": @@ -849,7 +861,6 @@ def finish_pytest_item(self, test_item: Optional[Item] = None) -> None: self._process_metadata_item_finish(leaf) if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): - del self._bdd_item_by_name[test_item.name] return self._finish_step(self._build_finish_step_rq(leaf)) @@ -963,17 +974,6 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None LOGGER.exception(e) reporter.finish_nested_step(item_id, timestamp(), "FAILED") - def _get_python_name(self, name: str) -> str: - python_name = f"test_{make_python_name(name)}" - same_scenario_names = [name for name in self._bdd_item_by_name.keys() if name.startswith(python_name)] - if len(same_scenario_names) < 1: - return python_name - elif len(same_scenario_names) == 1: - return same_scenario_names[0] - else: - indexes = sorted([int(name.split("_")[-1]) for name in same_scenario_names]) - return f"{python_name}_{indexes[-1]}" - def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """Save BDD scenario and Feature to test tree. The scenario will be started later if a step will be reported. @@ -982,17 +982,31 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """ if not PYTEST_BDD: return - root_leaf = next(self._tree_path.values().__iter__())[0] + + root_leaf = self._bdd_root_leaf + if not root_leaf: + self._bdd_root_leaf = root_leaf = self._create_leaf(LeafType.ROOT, None, None, item_id=self.parent_item_id) children_leafs = root_leaf["children"] - feature_leaf = self._create_leaf(LeafType.FILE, root_leaf, feature) - children_leafs[feature] = feature_leaf - children_leafs = feature_leaf["children"] - scenario_leaf = self._create_leaf(LeafType.CODE, feature_leaf, scenario) + if feature in children_leafs: + feature_leaf = children_leafs[feature] + else: + feature_leaf = self._create_leaf(LeafType.FILE, root_leaf, feature) + children_leafs[feature] = feature_leaf + if scenario.rule: + rule = scenario.rule + if rule in children_leafs: + rule_leaf = children_leafs[rule] + else: + rule_leaf = self._create_leaf(LeafType.SUITE, feature_leaf, rule) + else: + rule_leaf = feature_leaf + children_leafs = rule_leaf["children"] + scenario_leaf = self._create_leaf(LeafType.CODE, rule_leaf, scenario) children_leafs[scenario] = scenario_leaf self._remove_file_names(root_leaf) self._generate_names(root_leaf) if not self._config.rp_hierarchy_code: - self._merge_code(root_leaf) + self._merge_code_with_separator(root_leaf, " - ") self._build_item_paths(root_leaf, []) def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: @@ -1009,6 +1023,13 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) + def _get_bdd_code_ref(self, scenario: Scenario) -> str: + code_ref = scenario.feature.rel_filename + "/" + if scenario.rule: + code_ref += f"[RULE:{scenario.rule.name}]/" + code_ref += f"[SCENARIO:{scenario.name}]" + return code_ref + def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: """ Process all types of scenario metadata for its start event. @@ -1017,8 +1038,8 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: """ scenario = leaf["item"] leaf["description"] = scenario.description + leaf["code_ref"] = self._get_bdd_code_ref(scenario) # TODO: Add support for pytest-bdd parameters - # leaf["code_ref"] = scenario.location # leaf["test_case_id"] = self._get_test_case_id(None, leaf) # leaf["attributes"] = self._process_attributes(scenario) @@ -1037,10 +1058,14 @@ def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> No scenario_leaf = self._tree_path[scenario][-1] if scenario_leaf["exec"] != ExecStatus.IN_PROGRESS: self._process_scenario_metadata(scenario_leaf) - scenario_id = self._start_step(self._build_start_step_rq(scenario_leaf)) + scenario_leaf["item_id"] = self._start_step(self._build_start_step_rq(scenario_leaf)) + scenario_leaf["exec"] = ExecStatus.IN_PROGRESS reporter = self.rp.step_reporter item_id = reporter.start_nested_step(step.name, timestamp()) - # TODO: Finish nested step logic + step_leaf = self._create_leaf(LeafType.NESTED, scenario_leaf, step) + scenario_leaf["children"][step] = step_leaf + step_leaf["item_id"] = item_id + step_leaf["exec"] = ExecStatus.IN_PROGRESS @check_rp_enabled def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: @@ -1053,6 +1078,16 @@ def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> N if not PYTEST_BDD: return + status = "PASSED" + if step.failed: + status = "FAILED" + reporter = self.rp.step_reporter + scenario_leaf = self._tree_path[scenario][-1] + step_leaf = scenario_leaf["children"][step] + item_id = step_leaf["item_id"] + reporter.finish_nested_step(item_id, timestamp(), status) + step_leaf["exec"] = ExecStatus.FINISHED + @check_rp_enabled def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step, exception: Exception) -> None: """Report BDD step error. @@ -1064,10 +1099,15 @@ def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step """ if not PYTEST_BDD: return - test_item = self._bdd_item_by_scenario[scenario] - self.post_log(test_item, str(exception), log_level="ERROR") - leaf = self._tree_path[test_item][-1] - leaf["status"] = "FAILED" + + scenario_leaf = self._tree_path[scenario][-1] + step_leaf = scenario_leaf["children"][step] + item_id = step_leaf["item_id"] + traceback_str = "\n".join(traceback.format_exception(exception)) + exception_log = self._build_log(item_id, traceback_str, log_level="ERROR") + client = self.rp.step_reporter.client + client.log(**exception_log) + self.post_log() def start(self) -> None: """Start servicing Report Portal requests.""" diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 15e88da..1e2e4dc 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -14,27 +14,36 @@ from unittest import mock +from reportportal_client.steps import StepReporter + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils +def setup_mock(mock_client_init): + mock_client = mock_client_init.return_value + mock_client.step_reporter = StepReporter(mock_client) + return mock_client + + @mock.patch(REPORT_PORTAL_SERVICE) def test_bdd(mock_client_init): + mock_client = setup_mock(mock_client_init) variables = {} variables.update(utils.DEFAULT_VARIABLES.items()) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"], variables=variables) assert int(result) == 0, "Exit code should be 0 (no errors)" - mock_client = mock_client_init.return_value - assert mock_client.start_test_item.call_count == 6, 'There should be exactly six "start_test_item" calls' + assert mock_client.start_test_item.call_count == 5, 'There should be exactly six "start_test_item" calls' assert ( mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count ), '"start_test_item" and "finish_test_item" should be called the same number of times' - # Check that scenarios and steps are reported correctly - scenario_calls = [ - call for call in mock_client.start_test_item.call_args_list if call[1]["item_type"] == "SCENARIO" - ] - step_calls = [call for call in mock_client.start_test_item.call_args_list if call[1]["item_type"] == "STEP"] - assert len(scenario_calls) == 1, "There should be exactly one Scenario reported" - assert len(step_calls) == 4, "There should be exactly four Steps reported" + scenario_call = mock_client.start_test_item.call_args_list[0] + assert scenario_call[1]["item_type"] == "STEP", "First call should be a Scenario" + assert scenario_call[1].get("has_stats", True) is True, "First call should have stats" + + step_calls = mock_client.start_test_item.call_args_list[1:] + for call in step_calls: + assert call[0][2] == "step", "All other calls should be Steps" + assert call[1]["has_stats"] is False, "All other calls should not have stats" From aa0c68377c2504cc5b519c7b59532969a892da7f Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 12:36:05 +0300 Subject: [PATCH 053/110] Add code_ref check --- tests/integration/test_bdd.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 1e2e4dc..474bf46 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -47,3 +47,8 @@ def test_bdd(mock_client_init): for call in step_calls: assert call[0][2] == "step", "All other calls should be Steps" assert call[1]["has_stats"] is False, "All other calls should not have stats" + + assert ( + scenario_call[1]["code_ref"] + == "features/arguments_four_steps.feature/[SCENARIO:Arguments for given, when, and, then]" + ) From 8d46179c44178afaed3175f0c68edc3281bd2f57 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 12:42:22 +0300 Subject: [PATCH 054/110] Add more checks and fixes --- pytest_reportportal/service.py | 9 ++++++--- tests/integration/test_bdd.py | 10 ++++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 1c22a36..697afff 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1023,13 +1023,16 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) - def _get_bdd_code_ref(self, scenario: Scenario) -> str: + def _get_scenario_code_ref(self, scenario: Scenario) -> str: code_ref = scenario.feature.rel_filename + "/" if scenario.rule: code_ref += f"[RULE:{scenario.rule.name}]/" code_ref += f"[SCENARIO:{scenario.name}]" return code_ref + def _get_scenario_test_case_id(self, leaf: Dict[str, Any]) -> str: + return leaf["code_ref"] + def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: """ Process all types of scenario metadata for its start event. @@ -1038,9 +1041,9 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: """ scenario = leaf["item"] leaf["description"] = scenario.description - leaf["code_ref"] = self._get_bdd_code_ref(scenario) + leaf["code_ref"] = self._get_scenario_code_ref(scenario) + leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) # TODO: Add support for pytest-bdd parameters - # leaf["test_case_id"] = self._get_test_case_id(None, leaf) # leaf["attributes"] = self._process_attributes(scenario) @check_rp_enabled diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 474bf46..537bcd7 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -48,7 +48,9 @@ def test_bdd(mock_client_init): assert call[0][2] == "step", "All other calls should be Steps" assert call[1]["has_stats"] is False, "All other calls should not have stats" - assert ( - scenario_call[1]["code_ref"] - == "features/arguments_four_steps.feature/[SCENARIO:Arguments for given, when, and, then]" - ) + code_ref = "features/arguments_four_steps.feature/[SCENARIO:Arguments for given, when, and, then]" + assert scenario_call[1]["code_ref"] == code_ref + assert scenario_call[1]["test_case_id"] == code_ref + assert scenario_call[1]["parent_item_id"] is None + assert scenario_call[1]["parameters"] is None + assert scenario_call[1]["description"] is None From 97a8b52488639f6fed37e92299b0cd5388a567da Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 13:47:52 +0300 Subject: [PATCH 055/110] Fix traceback logging for old versions of Python --- pytest_reportportal/service.py | 15 +++++++++++---- tests/integration/test_fixtures.py | 6 ------ 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 697afff..6e20d9a 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -27,7 +27,7 @@ from _pytest.doctest import DoctestItem from aenum import Enum, auto, unique from py.path import local -from pytest import Class, Function, Item, Mark, Module, Package, PytestWarning, Session +from pytest import Class, Function, Item, Module, Package, PytestWarning, Session from reportportal_client.aio import Task from reportportal_client.core.rp_issues import ExternalIssue, Issue from reportportal_client.helpers import timestamp @@ -45,7 +45,11 @@ except ImportError: # in pytest < 8.0 there is no such type Dir = type("dummy", (), {}) - +try: + from pytest import Mark +except ImportError: + # in old pytest marks are located in the _pytest.mark module + from _pytest.mark import Mark try: # noinspection PyPackageRequirements # noinspection PyPackageRequirements @@ -958,14 +962,17 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None try: outcome = yield - exception = outcome.exception + exc_info = outcome.excinfo + exception = exc_info[1] status = "PASSED" if exception: if type(exception).__name__ != "Skipped": status = "FAILED" error_log = self._build_log(item_id, error_msg, log_level="ERROR") self.rp.log(**error_log) - traceback_str = "\n".join(traceback.format_exception(exception)) + traceback_str = "\n".join( + traceback.format_exception(outcome.excinfo[0], value=exception, tb=exc_info[2]) + ) exception_log = self._build_log(item_id, traceback_str, log_level="ERROR") self.rp.log(**exception_log) reporter.finish_nested_step(item_id, timestamp(), status) diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index 0ae828b..df01654 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys from collections import defaultdict from typing import Optional from unittest import mock @@ -195,7 +194,6 @@ def test_fixture_teardown(mock_client_init): FIXTURE_FAILED_MESSAGE = "function fixture setup failed: test_fixture_setup_failure_config" -@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_fixture_setup_failure(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) @@ -435,7 +433,6 @@ def test_failure_fixture_teardown(mock_client_init): ) -@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_session_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) @@ -463,7 +460,6 @@ def test_session_fixture_setup(mock_client_init): assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_package_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) @@ -491,7 +487,6 @@ def test_package_fixture_setup(mock_client_init): assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_module_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) @@ -519,7 +514,6 @@ def test_module_fixture_setup(mock_client_init): assert not setup_call_kwargs["has_stats"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python 3.8+ required due to bugs in older versions") @mock.patch(REPORT_PORTAL_SERVICE) def test_class_fixture_setup(mock_client_init): mock_client = setup_mock(mock_client_init) From 46473fc719db650d7465e90d9c1d42ed65733fa9 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 13:53:44 +0300 Subject: [PATCH 056/110] Bump pytest-bdd version --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 0209e42..ceba64f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,4 +1,4 @@ delayed-assert pytest-cov pytest-parallel -pytest-bdd>=7.0.1 +pytest-bdd>=7.1.1 From bdc64233da01e71ced4506f270c4bc461e2c7c57 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 14:06:46 +0300 Subject: [PATCH 057/110] Bump pytest-bdd version --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index ceba64f..f4b63b7 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,4 +1,4 @@ delayed-assert pytest-cov pytest-parallel -pytest-bdd>=7.1.1 +pytest-bdd>=7.2.0 From eaf449e46c0db435261b91093ff77e1c3f399349 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 14:06:57 +0300 Subject: [PATCH 058/110] Backward compatibility fixes --- pytest_reportportal/service.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 6e20d9a..a00e677 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -352,9 +352,11 @@ def _generate_names(self, test_tree: Dict[str, Any]) -> None: test_tree["name"] = os.path.split(str(item.fspath))[1] elif isinstance(item, Feature): name = item.name if item.name else item.rel_filename - test_tree["name"] = f"{item.keyword}: {name}" + keyword = getattr(item, "keyword", "Feature") + test_tree["name"] = f"{keyword}: {name}" elif isinstance(item, Scenario): - test_tree["name"] = f"{item.keyword}: {item.name}" + keyword = getattr(item, "keyword", "Scenario") + test_tree["name"] = f"{keyword}: {item.name}" else: test_tree["name"] = item.name @@ -999,8 +1001,8 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: else: feature_leaf = self._create_leaf(LeafType.FILE, root_leaf, feature) children_leafs[feature] = feature_leaf - if scenario.rule: - rule = scenario.rule + rule = getattr(scenario, "rule", None) + if rule: if rule in children_leafs: rule_leaf = children_leafs[rule] else: @@ -1032,8 +1034,9 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: def _get_scenario_code_ref(self, scenario: Scenario) -> str: code_ref = scenario.feature.rel_filename + "/" - if scenario.rule: - code_ref += f"[RULE:{scenario.rule.name}]/" + rule = getattr(scenario, "rule", None) + if rule: + code_ref += f"[RULE:{rule.name}]/" code_ref += f"[SCENARIO:{scenario.name}]" return code_ref @@ -1113,7 +1116,7 @@ def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step scenario_leaf = self._tree_path[scenario][-1] step_leaf = scenario_leaf["children"][step] item_id = step_leaf["item_id"] - traceback_str = "\n".join(traceback.format_exception(exception)) + traceback_str = "\n".join(traceback.format_exception(type(exception), exception, exception.__traceback__)) exception_log = self._build_log(item_id, traceback_str, log_level="ERROR") client = self.rp.step_reporter.client client.log(**exception_log) From 48ef88cf50fc4441a4eed5a1ceb65008ee4f5e84 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 14:08:14 +0300 Subject: [PATCH 059/110] Backward compatibility fixes --- pytest_reportportal/service.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index a00e677..9eb7cf5 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1116,7 +1116,9 @@ def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step scenario_leaf = self._tree_path[scenario][-1] step_leaf = scenario_leaf["children"][step] item_id = step_leaf["item_id"] - traceback_str = "\n".join(traceback.format_exception(type(exception), exception, exception.__traceback__)) + traceback_str = "\n".join( + traceback.format_exception(type(exception), value=exception, tb=exception.__traceback__) + ) exception_log = self._build_log(item_id, traceback_str, log_level="ERROR") client = self.rp.step_reporter.client client.log(**exception_log) From 6ec27d3c3a9c5e74acdc10d40643304bde3eaabe Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 15:46:00 +0300 Subject: [PATCH 060/110] Add tests --- .../arguments_four_steps_description.feature | 15 +++ pytest_reportportal/service.py | 10 +- tests/integration/test_bdd.py | 94 ++++++++++++++++++- 3 files changed, 113 insertions(+), 6 deletions(-) create mode 100644 examples/bdd/features/arguments_four_steps_description.feature diff --git a/examples/bdd/features/arguments_four_steps_description.feature b/examples/bdd/features/arguments_four_steps_description.feature new file mode 100644 index 0000000..de6956d --- /dev/null +++ b/examples/bdd/features/arguments_four_steps_description.feature @@ -0,0 +1,15 @@ +Feature: Four step arguments + Description for the feature + + Scenario: Arguments for given, when, and, then + Description for the scenario + + Given there are 5 cucumbers + """ + Docstring for the step + """ + + When I eat 3 cucumbers + And I eat 2 cucumbers + + Then I should have 0 cucumbers diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 9eb7cf5..7fedda2 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -53,13 +53,14 @@ try: # noinspection PyPackageRequirements # noinspection PyPackageRequirements - from pytest_bdd.parser import Feature, Scenario, Step + from pytest_bdd.parser import Feature, Scenario, ScenarioTemplate, Step from pytest_bdd.scenario import make_python_name PYTEST_BDD = True except ImportError: Feature = type("dummy", (), {}) Scenario = type("dummy", (), {}) + ScenarioTemplate = type("dummy", (), {}) Step = type("dummy", (), {}) make_python_name: Callable[[str], str] = lambda x: x PYTEST_BDD = False @@ -442,8 +443,10 @@ def _get_item_description(self, test_item: Any) -> Optional[str]: return trim_docstring(doc) if isinstance(test_item, DoctestItem): return test_item.reportinfo()[2] - if isinstance(test_item, Feature): - return test_item.description + if isinstance(test_item, (Feature, Scenario, ScenarioTemplate)): + description = test_item.description + if description: + return description def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> Any: """ @@ -1050,7 +1053,6 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: :param leaf: item context """ scenario = leaf["item"] - leaf["description"] = scenario.description leaf["code_ref"] = self._get_scenario_code_ref(scenario) leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) # TODO: Add support for pytest-bdd parameters diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 537bcd7..3d99101 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -12,22 +12,58 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import defaultdict +from typing import Optional from unittest import mock +from reportportal_client import set_current from reportportal_client.steps import StepReporter from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils +ITEM_ID_DICT = defaultdict(lambda: 0) +ITEM_ID_LIST = [] + + +def generate_item_id(*args, **kwargs) -> str: + if args: + name = args[0] + else: + name = kwargs["name"] + ITEM_ID_DICT[name] += 1 + item_id = f"{name}_{ITEM_ID_DICT[name]}" + ITEM_ID_LIST.append(item_id) + return item_id + + +def get_last_item_id() -> Optional[str]: + if len(ITEM_ID_LIST) > 0: + return ITEM_ID_LIST[-1] + + +def remove_last_item_id(*_, **__) -> Optional[str]: + if len(ITEM_ID_LIST) > 0: + return ITEM_ID_LIST.pop() + def setup_mock(mock_client_init): mock_client = mock_client_init.return_value mock_client.step_reporter = StepReporter(mock_client) + set_current(mock_client) + return mock_client + + +def setup_mock_for_logging(mock_client_init): + mock_client = setup_mock(mock_client_init) + mock_client.start_test_item.side_effect = generate_item_id + mock_client.finish_test_item.side_effect = remove_last_item_id + mock_client.current_item.side_effect = get_last_item_id return mock_client @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd(mock_client_init): +def test_basic_bdd(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -40,17 +76,71 @@ def test_bdd(mock_client_init): ), '"start_test_item" and "finish_test_item" should be called the same number of times' scenario_call = mock_client.start_test_item.call_args_list[0] + code_ref = "features/arguments_four_steps.feature/[SCENARIO:Arguments for given, when, and, then]" assert scenario_call[1]["item_type"] == "STEP", "First call should be a Scenario" assert scenario_call[1].get("has_stats", True) is True, "First call should have stats" + assert scenario_call[1]["code_ref"] == code_ref + assert scenario_call[1]["test_case_id"] == code_ref + assert scenario_call[1]["name"] == "Feature: Four step arguments - Scenario: Arguments for given, when, and, then" + assert scenario_call[1]["parent_item_id"] is None + assert scenario_call[1]["parameters"] is None + assert scenario_call[1]["description"] is None step_calls = mock_client.start_test_item.call_args_list[1:] for call in step_calls: assert call[0][2] == "step", "All other calls should be Steps" assert call[1]["has_stats"] is False, "All other calls should not have stats" + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_basic_bdd_with_feature_suite(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + assert mock_client.start_test_item.call_count == 6, 'There should be exactly six "start_test_item" calls' + assert ( + mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count + ), '"start_test_item" and "finish_test_item" should be called the same number of times' + + suite_call = mock_client.start_test_item.call_args_list[0] + assert suite_call[1]["item_type"] == "SUITE", "First call should be a Feature" + assert suite_call[1].get("has_stats", True) is True, "First call should have stats" + assert suite_call[1]["parent_item_id"] is None + assert suite_call[1]["name"] == "Feature: Four step arguments" + + scenario_call = mock_client.start_test_item.call_args_list[1] code_ref = "features/arguments_four_steps.feature/[SCENARIO:Arguments for given, when, and, then]" + assert scenario_call[1]["item_type"] == "STEP", "First call should be a Scenario" + assert scenario_call[1].get("has_stats", True) is True, "First call should have stats" assert scenario_call[1]["code_ref"] == code_ref assert scenario_call[1]["test_case_id"] == code_ref - assert scenario_call[1]["parent_item_id"] is None + assert scenario_call[1]["name"] == "Scenario: Arguments for given, when, and, then" + assert scenario_call[1]["parent_item_id"] == "Feature: Four step arguments_1" assert scenario_call[1]["parameters"] is None assert scenario_call[1]["description"] is None + + step_calls = mock_client.start_test_item.call_args_list[2:] + for call in step_calls: + assert call[0][2] == "step", "All other calls should be Steps" + assert call[1]["has_stats"] is False, "All other calls should not have stats" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_scenario_descriptions(mock_client_init): + mock_client = setup_mock(mock_client_init) + variables = {} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_arguments_description.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + code_ref = "features/arguments_four_steps_description.feature/[SCENARIO:Arguments for given, when, and, then]" + scenario_call = mock_client.start_test_item.call_args_list[0] + assert scenario_call[1]["code_ref"] == code_ref + assert scenario_call[1]["test_case_id"] == code_ref + assert scenario_call[1]["description"] == "Description for the scenario" From be1e9786e1b8a47d0b30f22c2e280afec24f3e9b Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 15:46:15 +0300 Subject: [PATCH 061/110] Add tests --- .../step_defs/test_arguments_description.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 examples/bdd/step_defs/test_arguments_description.py diff --git a/examples/bdd/step_defs/test_arguments_description.py b/examples/bdd/step_defs/test_arguments_description.py new file mode 100644 index 0000000..d1301b7 --- /dev/null +++ b/examples/bdd/step_defs/test_arguments_description.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, parsers, scenarios, then, when + +scenarios("../features/arguments_four_steps_description.feature") + + +@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "eat": 0} + + +@when(parsers.parse("I eat {eat:d} cucumbers")) +def eat_cucumbers(cucumbers, eat): + cucumbers["eat"] += eat + + +@then(parsers.parse("I should have {left:d} cucumbers")) +def should_have_left_cucumbers(cucumbers, left): + assert cucumbers["start"] - cucumbers["eat"] == left From ea9d0565e8ceca993f2ba39edf383c8748acfbdb Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 15:54:51 +0300 Subject: [PATCH 062/110] Fix `report_bdd_step_error` function --- pytest_reportportal/service.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 7fedda2..bb78bf3 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1124,7 +1124,6 @@ def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step exception_log = self._build_log(item_id, traceback_str, log_level="ERROR") client = self.rp.step_reporter.client client.log(**exception_log) - self.post_log() def start(self) -> None: """Start servicing Report Portal requests.""" From 93c67625dacfca1e26db2c9b8fe19a5fc8d38ebb Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 16:01:28 +0300 Subject: [PATCH 063/110] Make docstring test separate one --- .../arguments_four_steps_description.feature | 4 ---- .../arguments_four_steps_docstring.feature | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 examples/bdd/features/arguments_four_steps_docstring.feature diff --git a/examples/bdd/features/arguments_four_steps_description.feature b/examples/bdd/features/arguments_four_steps_description.feature index de6956d..9d4285c 100644 --- a/examples/bdd/features/arguments_four_steps_description.feature +++ b/examples/bdd/features/arguments_four_steps_description.feature @@ -5,10 +5,6 @@ Feature: Four step arguments Description for the scenario Given there are 5 cucumbers - """ - Docstring for the step - """ - When I eat 3 cucumbers And I eat 2 cucumbers diff --git a/examples/bdd/features/arguments_four_steps_docstring.feature b/examples/bdd/features/arguments_four_steps_docstring.feature new file mode 100644 index 0000000..de6956d --- /dev/null +++ b/examples/bdd/features/arguments_four_steps_docstring.feature @@ -0,0 +1,15 @@ +Feature: Four step arguments + Description for the feature + + Scenario: Arguments for given, when, and, then + Description for the scenario + + Given there are 5 cucumbers + """ + Docstring for the step + """ + + When I eat 3 cucumbers + And I eat 2 cucumbers + + Then I should have 0 cucumbers From 428ea22ae0283211310876f04a6c000c64cad01f Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 16:18:15 +0300 Subject: [PATCH 064/110] Backward compatibility fix --- tests/integration/test_bdd.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 3d99101..5ad479d 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import importlib.metadata from collections import defaultdict from typing import Optional from unittest import mock @@ -22,6 +23,8 @@ from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils +pytest_bdd_version = [int(p) for p in importlib.metadata.version("pytest-bdd").split(".")] + ITEM_ID_DICT = defaultdict(lambda: 0) ITEM_ID_LIST = [] @@ -143,4 +146,22 @@ def test_bdd_scenario_descriptions(mock_client_init): scenario_call = mock_client.start_test_item.call_args_list[0] assert scenario_call[1]["code_ref"] == code_ref assert scenario_call[1]["test_case_id"] == code_ref - assert scenario_call[1]["description"] == "Description for the scenario" + description = scenario_call[1]["description"] + if pytest_bdd_version[0] < 8: + # before pytest-bdd 8 description was a list + description = description[0] + assert description == "Description for the scenario" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_feature_descriptions(mock_client_init): + mock_client = setup_mock(mock_client_init) + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_arguments_description.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + feature_call = mock_client.start_test_item.call_args_list[0] + assert feature_call[1]["description"] == "Description for the feature" From 0b3d9da4aad88bc93c81a4159e31b617ef2a6d60 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 17:12:53 +0300 Subject: [PATCH 065/110] Add another test and fixes --- examples/bdd/features/failed_scenario.feature | 4 ++ examples/bdd/step_defs/test_failed_step.py | 22 ++++++++ pytest_reportportal/plugin.py | 5 +- pytest_reportportal/service.py | 24 +++++--- tests/integration/test_bdd.py | 56 +++++++++++++++++-- 5 files changed, 94 insertions(+), 17 deletions(-) create mode 100644 examples/bdd/features/failed_scenario.feature create mode 100644 examples/bdd/step_defs/test_failed_step.py diff --git a/examples/bdd/features/failed_scenario.feature b/examples/bdd/features/failed_scenario.feature new file mode 100644 index 0000000..861fb44 --- /dev/null +++ b/examples/bdd/features/failed_scenario.feature @@ -0,0 +1,4 @@ +Feature: Test failed scenario + + Scenario: The scenario + Given I have a failed step diff --git a/examples/bdd/step_defs/test_failed_step.py b/examples/bdd/step_defs/test_failed_step.py new file mode 100644 index 0000000..d6967a9 --- /dev/null +++ b/examples/bdd/step_defs/test_failed_step.py @@ -0,0 +1,22 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, scenarios + +scenarios("../features/failed_scenario.feature") + + +@given("I have a failed step") +def given_failed_step(): + assert False diff --git a/pytest_reportportal/plugin.py b/pytest_reportportal/plugin.py index 1d616d4..84832c2 100644 --- a/pytest_reportportal/plugin.py +++ b/pytest_reportportal/plugin.py @@ -491,7 +491,7 @@ def pytest_bdd_step_error( yield service = config.py_test_service - service.report_bdd_step_error(feature, scenario, step, exception) + service.finish_bdd_step_error(feature, scenario, step, exception) @pytest.hookimpl(hookwrapper=True) def pytest_bdd_step_func_lookup_error( @@ -514,8 +514,7 @@ def pytest_bdd_step_func_lookup_error( service = config.py_test_service service.start_bdd_step(feature, scenario, step) yield - service.report_bdd_step_error(feature, scenario, step, exception) - service.finish_bdd_step(feature, scenario, step) + service.finish_bdd_step_error(feature, scenario, step, exception) # no types for backward compatibility for older pytest versions diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index bb78bf3..32bed87 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1076,12 +1076,21 @@ def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> No scenario_leaf["item_id"] = self._start_step(self._build_start_step_rq(scenario_leaf)) scenario_leaf["exec"] = ExecStatus.IN_PROGRESS reporter = self.rp.step_reporter - item_id = reporter.start_nested_step(step.name, timestamp()) + item_id = reporter.start_nested_step(f"{step.keyword} {step.name}", timestamp()) step_leaf = self._create_leaf(LeafType.NESTED, scenario_leaf, step) scenario_leaf["children"][step] = step_leaf step_leaf["item_id"] = item_id step_leaf["exec"] = ExecStatus.IN_PROGRESS + def _finish_bdd_step(self, leaf: Dict[str, Any], status: str) -> None: + if leaf["exec"] != ExecStatus.IN_PROGRESS: + return + + reporter = self.rp.step_reporter + item_id = leaf["item_id"] + reporter.finish_nested_step(item_id, timestamp(), status) + leaf["exec"] = ExecStatus.FINISHED + @check_rp_enabled def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: """Finish BDD step. @@ -1093,18 +1102,12 @@ def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> N if not PYTEST_BDD: return - status = "PASSED" - if step.failed: - status = "FAILED" - reporter = self.rp.step_reporter scenario_leaf = self._tree_path[scenario][-1] step_leaf = scenario_leaf["children"][step] - item_id = step_leaf["item_id"] - reporter.finish_nested_step(item_id, timestamp(), status) - step_leaf["exec"] = ExecStatus.FINISHED + self._finish_bdd_step(step_leaf, "PASSED") @check_rp_enabled - def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step, exception: Exception) -> None: + def finish_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step, exception: Exception) -> None: """Report BDD step error. :param feature: pytest_bdd.Feature @@ -1116,6 +1119,7 @@ def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step return scenario_leaf = self._tree_path[scenario][-1] + scenario_leaf["status"] = "FAILED" step_leaf = scenario_leaf["children"][step] item_id = step_leaf["item_id"] traceback_str = "\n".join( @@ -1125,6 +1129,8 @@ def report_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step client = self.rp.step_reporter.client client.log(**exception_log) + self._finish_bdd_step(step_leaf, "FAILED") + def start(self) -> None: """Start servicing Report Portal requests.""" self.parent_item_id = self._config.rp_parent_item_id diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 5ad479d..4b2a0a4 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -65,15 +65,24 @@ def setup_mock_for_logging(mock_client_init): return mock_client +STEP_NAMES = [ + "Given there are 5 cucumbers", + "When I eat 3 cucumbers", + "And I eat 2 cucumbers", + "Then I should have 0 cucumbers", +] + + @mock.patch(REPORT_PORTAL_SERVICE) def test_basic_bdd(mock_client_init): mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) variables = {} variables.update(utils.DEFAULT_VARIABLES.items()) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"], variables=variables) assert int(result) == 0, "Exit code should be 0 (no errors)" - assert mock_client.start_test_item.call_count == 5, 'There should be exactly six "start_test_item" calls' + assert mock_client.start_test_item.call_count == 5, 'There should be exactly five "start_test_item" calls' assert ( mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count ), '"start_test_item" and "finish_test_item" should be called the same number of times' @@ -90,13 +99,26 @@ def test_basic_bdd(mock_client_init): assert scenario_call[1]["description"] is None step_calls = mock_client.start_test_item.call_args_list[1:] - for call in step_calls: + for i, call in enumerate(step_calls): + assert call[0][0] == STEP_NAMES[i] assert call[0][2] == "step", "All other calls should be Steps" assert call[1]["has_stats"] is False, "All other calls should not have stats" + finish_calls = mock_client.finish_test_item.call_args_list + finish_step_calls = finish_calls[:-1] + for i, call in enumerate(finish_step_calls): + assert call[0][0] == f"{STEP_NAMES[i]}_1" + assert call[1]["status"] == "PASSED" + finish_scenario_call = finish_calls[-1] + assert finish_scenario_call[1]["status"] == "PASSED" + assert ( + finish_scenario_call[1]["item_id"] + == "Feature: Four step arguments - Scenario: Arguments for given, when, and, then_1" + ) + @mock.patch(REPORT_PORTAL_SERVICE) -def test_basic_bdd_with_feature_suite(mock_client_init): +def basic_bdd_with_feature_suite(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {"rp_hierarchy_code": True} @@ -133,7 +155,7 @@ def test_basic_bdd_with_feature_suite(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_scenario_descriptions(mock_client_init): +def bdd_scenario_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -154,7 +176,7 @@ def test_bdd_scenario_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_feature_descriptions(mock_client_init): +def bdd_feature_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {"rp_hierarchy_code": True} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -165,3 +187,27 @@ def test_bdd_feature_descriptions(mock_client_init): feature_call = mock_client.start_test_item.call_args_list[0] assert feature_call[1]["description"] == "Description for the feature" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_failed_feature(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + variables = {} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_failed_step.py"], variables=variables) + assert int(result) == 1, "Exit code should be 1 (test error)" + + assert mock_client.start_test_item.call_count == 2, 'There should be exactly two "start_test_item" calls' + assert ( + mock_client.start_test_item.call_count == mock_client.finish_test_item.call_count + ), '"start_test_item" and "finish_test_item" should be called the same number of times' + + finish_calls = mock_client.finish_test_item.call_args_list + finish_step_call = finish_calls[0] + finish_scenario_call = finish_calls[1] + + assert finish_step_call[0][0] == "Given I have a failed step_1" + assert finish_step_call[1]["status"] == "FAILED" + assert finish_scenario_call[1]["item_id"] == "Feature: Test failed scenario - Scenario: The scenario_1" + assert finish_scenario_call[1]["status"] == "FAILED" From 9f620a2e200aadf3ebc132bd98a1edbd664320f4 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 18 Feb 2025 17:39:41 +0300 Subject: [PATCH 066/110] Add another check and fixes --- pytest_reportportal/service.py | 3 +++ tests/integration/test_bdd.py | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 32bed87..2acfb09 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -786,6 +786,9 @@ def process_results(self, test_item: Item, report): :param test_item: pytest.Item :param report: pytest's result report """ + if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + return + if report.longrepr: self.post_log(test_item, report.longreprtext, log_level="ERROR") diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 4b2a0a4..37430b9 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -211,3 +211,12 @@ def test_bdd_failed_feature(mock_client_init): assert finish_step_call[1]["status"] == "FAILED" assert finish_scenario_call[1]["item_id"] == "Feature: Test failed scenario - Scenario: The scenario_1" assert finish_scenario_call[1]["status"] == "FAILED" + + log_count = mock_client.log.call_count + # 1 - debug log from pytest-bdd's scenario module; 2 - traceback log from the agent + assert log_count == 1 + 1, 'Incorrect number of "log" calls' + + log_call_args_list = mock_client.log.call_args_list[1:] + assert log_call_args_list[0][1]["level"] == "ERROR" + assert log_call_args_list[0][1]["message"].endswith("AssertionError: assert False\n") + assert log_call_args_list[0][1]["item_id"] == "Given I have a failed step_1" From d6cedd46a29f789c3489a615d3dbf0a99230e611 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 19 Feb 2025 13:03:40 +0300 Subject: [PATCH 067/110] Log pytest's error to scenario --- pytest_reportportal/service.py | 32 ++++++++++++++++++++++++++++++-- tests/integration/test_bdd.py | 16 ++++++++++------ 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 2acfb09..e015da4 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -156,6 +156,8 @@ class PyTestService: _issue_types: Dict[str, str] _tree_path: Dict[Any, List[Dict[str, Any]]] _bdd_root_leaf: Optional[Dict[str, Any]] + _bdd_item_by_name: Dict[str, Item] + _bdd_scenario_by_item: Dict[Item, Scenario] _start_tracker: Set[str] _launch_id: Optional[str] agent_name: str @@ -171,6 +173,8 @@ def __init__(self, agent_config: AgentConfig) -> None: self._issue_types = {} self._tree_path = {} self._bdd_root_leaf = None + self._bdd_item_by_name = {} + self._bdd_scenario_by_item = {} self._start_tracker = set() self._launch_id = None self.agent_name = "pytest-reportportal" @@ -770,6 +774,7 @@ def start_pytest_item(self, test_item: Optional[Item] = None): self.start() if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + self._bdd_item_by_name[test_item.name] = test_item return self._create_suite_path(test_item) @@ -786,12 +791,13 @@ def process_results(self, test_item: Item, report): :param test_item: pytest.Item :param report: pytest's result report """ - if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): - return if report.longrepr: self.post_log(test_item, report.longreprtext, log_level="ERROR") + if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + return + leaf = self._tree_path[test_item][-1] # Defining test result if report.when == "setup": @@ -873,6 +879,7 @@ def finish_pytest_item(self, test_item: Optional[Item] = None) -> None: self._process_metadata_item_finish(leaf) if PYTEST_BDD and test_item.location[0].endswith("/pytest_bdd/scenario.py"): + del self._bdd_item_by_name[test_item.name] return self._finish_step(self._build_finish_step_rq(leaf)) @@ -951,6 +958,13 @@ def post_log( "Incorrect loglevel = %s. Force set to INFO. " "Available levels: %s.", log_level, KNOWN_LOG_LEVELS ) item_id = self._tree_path[test_item][-1]["item_id"] + if PYTEST_BDD: + if not item_id: + # Check if we are actually a BDD scenario + scenario = self._bdd_scenario_by_item[test_item] + if scenario: + # Yes, we are a BDD scenario, report log to the scenario + item_id = self._tree_path[scenario][-1]["item_id"] sl_rq = self._build_log(item_id, message, log_level, attachment) self.rp.log(**sl_rq) @@ -989,6 +1003,17 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None LOGGER.exception(e) reporter.finish_nested_step(item_id, timestamp(), "FAILED") + def _get_python_name(self, name: str) -> str: + python_name = f"test_{make_python_name(name)}" + same_scenario_names = [name for name in self._bdd_item_by_name.keys() if name.startswith(python_name)] + if len(same_scenario_names) < 1: + return python_name + elif len(same_scenario_names) == 1: + return same_scenario_names[0] + else: + indexes = sorted([int(name.split("_")[-1]) for name in same_scenario_names]) + return f"{python_name}_{indexes[-1]}" + def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """Save BDD scenario and Feature to test tree. The scenario will be started later if a step will be reported. @@ -997,6 +1022,9 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """ if not PYTEST_BDD: return + item_name = self._get_python_name(scenario.name) + test_item = self._bdd_item_by_name.get(item_name, None) + self._bdd_scenario_by_item[test_item] = scenario root_leaf = self._bdd_root_leaf if not root_leaf: diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 37430b9..1279a0e 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -74,7 +74,7 @@ def setup_mock_for_logging(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_basic_bdd(mock_client_init): +def test_test_basic_bdd(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {} @@ -118,7 +118,7 @@ def test_basic_bdd(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def basic_bdd_with_feature_suite(mock_client_init): +def test_basic_bdd_with_feature_suite(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {"rp_hierarchy_code": True} @@ -155,7 +155,7 @@ def basic_bdd_with_feature_suite(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def bdd_scenario_descriptions(mock_client_init): +def test_bdd_scenario_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -176,7 +176,7 @@ def bdd_scenario_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def bdd_feature_descriptions(mock_client_init): +def test_bdd_feature_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {"rp_hierarchy_code": True} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -213,10 +213,14 @@ def test_bdd_failed_feature(mock_client_init): assert finish_scenario_call[1]["status"] == "FAILED" log_count = mock_client.log.call_count - # 1 - debug log from pytest-bdd's scenario module; 2 - traceback log from the agent - assert log_count == 1 + 1, 'Incorrect number of "log" calls' + # 1 - debug log from pytest-bdd's scenario module; 2 - traceback log from the agent; 3 - error log from pytest + assert log_count == 1 + 1 + 1, 'Incorrect number of "log" calls' log_call_args_list = mock_client.log.call_args_list[1:] assert log_call_args_list[0][1]["level"] == "ERROR" assert log_call_args_list[0][1]["message"].endswith("AssertionError: assert False\n") assert log_call_args_list[0][1]["item_id"] == "Given I have a failed step_1" + + assert log_call_args_list[1][1]["level"] == "ERROR" + assert log_call_args_list[1][1]["message"].endswith("AssertionError") + assert log_call_args_list[1][1]["item_id"] == "Feature: Test failed scenario - Scenario: The scenario_1" From a61490ad4b096d7755bc440c0c59e335d49a04d4 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 19 Feb 2025 13:14:35 +0300 Subject: [PATCH 068/110] A style fix --- pytest_reportportal/service.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index e015da4..7a1b6ea 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -791,7 +791,6 @@ def process_results(self, test_item: Item, report): :param test_item: pytest.Item :param report: pytest's result report """ - if report.longrepr: self.post_log(test_item, report.longreprtext, log_level="ERROR") From b465f2d0df64fc93b554c6c7d2e179b2bc10af30 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 19 Feb 2025 15:33:00 +0300 Subject: [PATCH 069/110] Rename test --- tests/integration/test_bdd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 1279a0e..9d00e83 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -74,7 +74,7 @@ def setup_mock_for_logging(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_test_basic_bdd(mock_client_init): +def test_basic_bdd(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {} From 7592988169e878c0ee0fbe87d69d13b161f149d1 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 19 Feb 2025 16:18:08 +0300 Subject: [PATCH 070/110] Add more tests --- examples/bdd/features/belly.feature | 8 +++++ examples/bdd/step_defs/test_belly.py | 32 +++++++++++++++++++ tests/integration/test_bdd.py | 46 ++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+) create mode 100644 examples/bdd/features/belly.feature create mode 100644 examples/bdd/step_defs/test_belly.py diff --git a/examples/bdd/features/belly.feature b/examples/bdd/features/belly.feature new file mode 100644 index 0000000..6686183 --- /dev/null +++ b/examples/bdd/features/belly.feature @@ -0,0 +1,8 @@ +@smoke @test @feature:belly +Feature: Belly + + @ok @key:value + Scenario: a few cukes + Given I have 42 cukes in my belly + When I wait 1 hour + Then my belly should growl diff --git a/examples/bdd/step_defs/test_belly.py b/examples/bdd/step_defs/test_belly.py new file mode 100644 index 0000000..5afcd97 --- /dev/null +++ b/examples/bdd/step_defs/test_belly.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, parsers, scenarios, then, when + +scenarios("../features/belly.feature") + + +@given(parsers.parse("I have {start:d} cukes in my belly"), target_fixture="cucumbers") +def given_cucumbers(start): + return {"start": start, "wait": 0} + + +@when(parsers.parse("I wait {hours:d} hour")) +def then_wait(cucumbers, hours): + cucumbers["wait"] += hours + + +@then("my belly should growl") +def assert_growl(cucumbers): + assert cucumbers["start"] == cucumbers["wait"] * 42 diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 9d00e83..d3750f4 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -224,3 +224,49 @@ def test_bdd_failed_feature(mock_client_init): assert log_call_args_list[1][1]["level"] == "ERROR" assert log_call_args_list[1][1]["message"].endswith("AssertionError") assert log_call_args_list[1][1]["item_id"] == "Feature: Test failed scenario - Scenario: The scenario_1" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_scenario_attributes(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + + variables = {} + variables.update(utils.DEFAULT_VARIABLES.items()) + test_file = "examples/bdd/step_defs/test_belly.py" + result = utils.run_pytest_tests(tests=[test_file], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call = mock_client.start_test_item.call_args_list[0] + scenario_attrs = scenario_call[1].get("attributes", []) + assert scenario_attrs is not None + assert len(scenario_attrs) == 2 + assert {"value": "ok"} in scenario_attrs + assert {"key": "key", "value": "value"} in scenario_attrs + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_feature_attributes(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + test_file = "examples/bdd/step_defs/test_belly.py" + result = utils.run_pytest_tests(tests=[test_file], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + feature_call = mock_client.start_test_item.call_args_list[0] + feature_attrs = feature_call[1].get("attributes", []) + assert feature_attrs is not None + assert len(feature_attrs) == 3 + assert {"value": "smoke"} in feature_attrs + assert {"value": "test"} in feature_attrs + assert {"key": "feature", "value": "belly"} in feature_attrs + + scenario_call = mock_client.start_test_item.call_args_list[1] + scenario_attrs = scenario_call[1].get("attributes", []) + assert scenario_attrs is not None + assert len(scenario_attrs) == 2 + assert {"value": "ok"} in scenario_attrs + assert {"key": "key", "value": "value"} in scenario_attrs From a444a7854e482d92d25e5b4cf84ce71eecd4ef1c Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 19 Feb 2025 16:40:30 +0300 Subject: [PATCH 071/110] Add attribute processing --- pytest_reportportal/service.py | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 7a1b6ea..b8efa91 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -81,6 +81,7 @@ ISSUE_DESCRIPTION_ID_TEMPLATE: str = " {issue_id}" PYTHON_REPLACE_REGEX = re.compile(r"\W") ALPHA_REGEX = re.compile(r"^\d+_*") +ATTRIBUTE_DELIMITER = ":" def trim_docstring(docstring: str) -> str: @@ -465,25 +466,41 @@ def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> return func(leaf) return func(leaf) + def _process_bdd_attributes(self, scenario: Union[Feature, Scenario]) -> List[Dict[str, str]]: + attributes = [] + for tag in scenario.tags: + key = None + value = tag + if ATTRIBUTE_DELIMITER in tag: + key, value = tag.split(ATTRIBUTE_DELIMITER, 1) + attribute = {"value": value} + if key: + attribute["key"] = key + attributes.append(attribute) + return attributes + def _build_start_suite_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: + item = leaf["item"] if leaf["type"] == LeafType.DIR: - code_ref = str(leaf["item"]) + code_ref = str(item) elif leaf["type"] == LeafType.FILE: - if isinstance(leaf["item"], Feature): - code_ref = str(leaf["item"].rel_filename) + if isinstance(item, Feature): + code_ref = str(item.rel_filename) else: - code_ref = str(leaf["item"].fspath) + code_ref = str(item.fspath) else: - code_ref = str(leaf["item"].fspath) + code_ref = str(item.fspath) parent_item_id = self._lock(leaf["parent"], lambda p: p.get("item_id")) if "parent" in leaf else None payload = { "name": self._truncate_item_name(leaf["name"]), - "description": self._get_item_description(leaf["item"]), + "description": self._get_item_description(item), "start_time": timestamp(), "item_type": "SUITE", "code_ref": code_ref, "parent_item_id": parent_item_id, } + if isinstance(item, Feature): + payload["attributes"] = self._process_bdd_attributes(item) return payload def _start_suite(self, suite_rq: Dict[str, Any]) -> Optional[str]: @@ -1061,6 +1078,8 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: return leaf = self._tree_path[scenario][-1] + if leaf["exec"] != ExecStatus.IN_PROGRESS: + return self._finish_step(self._build_finish_step_rq(leaf)) leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) @@ -1085,8 +1104,7 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: scenario = leaf["item"] leaf["code_ref"] = self._get_scenario_code_ref(scenario) leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) - # TODO: Add support for pytest-bdd parameters - # leaf["attributes"] = self._process_attributes(scenario) + leaf["attributes"] = self._process_bdd_attributes(scenario) @check_rp_enabled def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: From 196e9f0986a530b374d554f964f4b77d61fb1a89 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 19 Feb 2025 17:41:11 +0300 Subject: [PATCH 072/110] Add another test --- .../bdd/features/background_scenario.feature | 10 +++ examples/bdd/step_defs/test_background.py | 32 ++++++++ examples/bdd/step_defs/test_failed_step.py | 2 +- pytest_reportportal/service.py | 15 ++-- tests/integration/test_bdd.py | 80 +++++++++++++++++-- 5 files changed, 126 insertions(+), 13 deletions(-) create mode 100644 examples/bdd/features/background_scenario.feature create mode 100644 examples/bdd/step_defs/test_background.py diff --git a/examples/bdd/features/background_scenario.feature b/examples/bdd/features/background_scenario.feature new file mode 100644 index 0000000..c67e488 --- /dev/null +++ b/examples/bdd/features/background_scenario.feature @@ -0,0 +1,10 @@ +Feature: Test scenario with a background + + Background: Init our scenario + Given I have empty step + + Scenario: The first scenario + Then I have another empty step + + Scenario: The second scenario + Then I have one more empty step diff --git a/examples/bdd/step_defs/test_background.py b/examples/bdd/step_defs/test_background.py new file mode 100644 index 0000000..912df73 --- /dev/null +++ b/examples/bdd/step_defs/test_background.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import scenarios, given, then + +scenarios("../features/background_scenario.feature") + + +@given("I have empty step") +def empty_step(): + pass + + +@then("I have another empty step") +def another_empty_step(): + pass + + +@then("I have one more empty step") +def one_more_empty_step(): + pass diff --git a/examples/bdd/step_defs/test_failed_step.py b/examples/bdd/step_defs/test_failed_step.py index d6967a9..e49b854 100644 --- a/examples/bdd/step_defs/test_failed_step.py +++ b/examples/bdd/step_defs/test_failed_step.py @@ -18,5 +18,5 @@ @given("I have a failed step") -def given_failed_step(): +def failed_step(): assert False diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index b8efa91..11672c0 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -156,7 +156,7 @@ class PyTestService: _config: AgentConfig _issue_types: Dict[str, str] _tree_path: Dict[Any, List[Dict[str, Any]]] - _bdd_root_leaf: Optional[Dict[str, Any]] + _bdd_tree: Optional[Dict[str, Any]] _bdd_item_by_name: Dict[str, Item] _bdd_scenario_by_item: Dict[Item, Scenario] _start_tracker: Set[str] @@ -173,7 +173,7 @@ def __init__(self, agent_config: AgentConfig) -> None: self._config = agent_config self._issue_types = {} self._tree_path = {} - self._bdd_root_leaf = None + self._bdd_tree = None self._bdd_item_by_name = {} self._bdd_scenario_by_item = {} self._start_tracker = set() @@ -380,6 +380,8 @@ def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separato current_name = test_tree["name"] del parent_leaf["children"][current_item] for item, child_leaf in child_items: + if child_leaf["type"] == LeafType.NESTED: + continue parent_leaf["children"][item] = child_leaf child_leaf["parent"] = parent_leaf child_leaf["name"] = current_name + separator + child_leaf["name"] @@ -1042,9 +1044,9 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: test_item = self._bdd_item_by_name.get(item_name, None) self._bdd_scenario_by_item[test_item] = scenario - root_leaf = self._bdd_root_leaf + root_leaf = self._bdd_tree if not root_leaf: - self._bdd_root_leaf = root_leaf = self._create_leaf(LeafType.ROOT, None, None, item_id=self.parent_item_id) + self._bdd_tree = root_leaf = self._create_leaf(LeafType.ROOT, None, None, item_id=self.parent_item_id) children_leafs = root_leaf["children"] if feature in children_leafs: feature_leaf = children_leafs[feature] @@ -1065,7 +1067,10 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: self._remove_file_names(root_leaf) self._generate_names(root_leaf) if not self._config.rp_hierarchy_code: - self._merge_code_with_separator(root_leaf, " - ") + try: + self._merge_code_with_separator(root_leaf, " - ") + except Exception as e: + LOGGER.exception(e) self._build_item_paths(root_leaf, []) def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index d3750f4..4cd534d 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -74,7 +74,7 @@ def setup_mock_for_logging(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_basic_bdd(mock_client_init): +def basic_bdd(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {} @@ -118,7 +118,7 @@ def test_basic_bdd(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_basic_bdd_with_feature_suite(mock_client_init): +def basic_bdd_with_feature_suite(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {"rp_hierarchy_code": True} @@ -155,7 +155,7 @@ def test_basic_bdd_with_feature_suite(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_scenario_descriptions(mock_client_init): +def bdd_scenario_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -176,7 +176,7 @@ def test_bdd_scenario_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_feature_descriptions(mock_client_init): +def bdd_feature_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {"rp_hierarchy_code": True} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -190,7 +190,7 @@ def test_bdd_feature_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_failed_feature(mock_client_init): +def bdd_failed_feature(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {} @@ -227,7 +227,7 @@ def test_bdd_failed_feature(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_scenario_attributes(mock_client_init): +def bdd_scenario_attributes(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) @@ -246,7 +246,7 @@ def test_bdd_scenario_attributes(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_feature_attributes(mock_client_init): +def bdd_feature_attributes(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) @@ -270,3 +270,69 @@ def test_bdd_feature_attributes(mock_client_init): assert len(scenario_attrs) == 2 assert {"value": "ok"} in scenario_attrs assert {"key": "key", "value": "value"} in scenario_attrs + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_background_step(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + + variables = {} + variables.update(utils.DEFAULT_VARIABLES.items()) + test_file = "examples/bdd/step_defs/test_background.py" + result = utils.run_pytest_tests(tests=[test_file], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify the first scenario + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert scenario_call_1[1]["name"] == "Feature: Test scenario with a background - Scenario: The first scenario" + assert scenario_call_1[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + + # Verify the Background step for the first scenario + background_call_1 = mock_client.start_test_item.call_args_list[1] + assert background_call_1[0][0] == "Background: Init our scenario" + assert background_call_1[1]["item_type"] == "STEP" + assert background_call_1[1]["has_stats"] is False + assert background_call_1[1]["parent_item_id"] == scenario_call_1[0][0] + + # Verify the nested steps within the Background for the first scenario + nested_step_call_1 = mock_client.start_test_item.call_args_list[2] + assert nested_step_call_1[0][0] == "Given I have empty step" + assert nested_step_call_1[1]["item_type"] == "STEP" + assert nested_step_call_1[1]["parent_item_id"] == background_call_1[0][0] + assert nested_step_call_1[1]["has_stats"] is False + + # Verify the step within the first scenario + scenario_step_call_1 = mock_client.start_test_item.call_args_list[3] + assert scenario_step_call_1[0][0] == "Then I have another empty step" + assert scenario_step_call_1[1]["item_type"] == "STEP" + assert scenario_step_call_1[1]["parent_item_id"] == scenario_call_1[0][0] + assert scenario_step_call_1[1]["has_stats"] is False + + # Verify the second scenario + scenario_call_2 = mock_client.start_test_item.call_args_list[4] + assert scenario_call_2[0][0] == "Feature: Test scenario with a background - Scenario: The second scenario" + assert scenario_call_2[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + + # Verify the Background step for the second scenario + background_call_2 = mock_client.start_test_item.call_args_list[5] + assert background_call_2[0][0] == "Background: Init our scenario" + assert background_call_2[1]["item_type"] == "STEP" + assert background_call_2[1]["has_stats"] is False + assert background_call_2[1]["parent_item_id"] == scenario_call_2[0][0] + + # Verify the nested steps within the Background for the second scenario + nested_step_call_2 = mock_client.start_test_item.call_args_list[6] + assert nested_step_call_2[0][0] == "Given I have empty step" + assert nested_step_call_2[1]["item_type"] == "STEP" + assert nested_step_call_2[1]["parent_item_id"] == background_call_2[0][0] + assert nested_step_call_2[1]["has_stats"] is False + + # Verify the step within the second scenario + scenario_step_call_2 = mock_client.start_test_item.call_args_list[7] + assert scenario_step_call_2[0][0] == "Then I have one more empty step" + assert scenario_step_call_2[1]["item_type"] == "STEP" + assert scenario_step_call_2[1]["parent_item_id"] == scenario_call_2[0][0] + assert scenario_step_call_2[1]["has_stats"] is False From ef2ae98dad2bb3e64365d401362cd2ea9998c065 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 19 Feb 2025 17:41:25 +0300 Subject: [PATCH 073/110] Add another test --- examples/bdd/step_defs/test_background.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/bdd/step_defs/test_background.py b/examples/bdd/step_defs/test_background.py index 912df73..fff783d 100644 --- a/examples/bdd/step_defs/test_background.py +++ b/examples/bdd/step_defs/test_background.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pytest_bdd import scenarios, given, then +from pytest_bdd import given, scenarios, then scenarios("../features/background_scenario.feature") From ec4ff64952be72e04c082dfbf4dcbe4a5adb9c3a Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 20 Feb 2025 18:02:17 +0300 Subject: [PATCH 074/110] Pytest BDD implementation: Backgrounds: WIP --- pytest_reportportal/service.py | 69 +++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 11672c0..22ef982 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -52,12 +52,14 @@ from _pytest.mark import Mark try: # noinspection PyPackageRequirements + from pytest_bdd.parser import Background, Feature, Scenario, ScenarioTemplate, Step + # noinspection PyPackageRequirements - from pytest_bdd.parser import Feature, Scenario, ScenarioTemplate, Step from pytest_bdd.scenario import make_python_name PYTEST_BDD = True except ImportError: + Background = type("dummy", (), {}) Feature = type("dummy", (), {}) Scenario = type("dummy", (), {}) ScenarioTemplate = type("dummy", (), {}) @@ -82,6 +84,7 @@ PYTHON_REPLACE_REGEX = re.compile(r"\W") ALPHA_REGEX = re.compile(r"^\d+_*") ATTRIBUTE_DELIMITER = ":" +BACKGROUND_STEP_NAME = "Background" def trim_docstring(docstring: str) -> str: @@ -374,17 +377,18 @@ def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separato if test_tree["type"] not in leaf_types: for item, child_leaf in child_items: self._merge_leaf_types(child_leaf, leaf_types, separator) - elif len(test_tree["children"].items()) > 0: + elif len(child_items) > 0: parent_leaf = test_tree["parent"] current_item = test_tree["item"] current_name = test_tree["name"] - del parent_leaf["children"][current_item] + child_types = [child_leaf["type"] in leaf_types for _, child_leaf in child_items] + if all(child_types): + del parent_leaf["children"][current_item] for item, child_leaf in child_items: - if child_leaf["type"] == LeafType.NESTED: - continue - parent_leaf["children"][item] = child_leaf - child_leaf["parent"] = parent_leaf - child_leaf["name"] = current_name + separator + child_leaf["name"] + if all(child_types): + parent_leaf["children"][item] = child_leaf + child_leaf["parent"] = parent_leaf + child_leaf["name"] = current_name + separator + child_leaf["name"] self._merge_leaf_types(child_leaf, leaf_types, separator) def _merge_dirs(self, test_tree: Dict[str, Any]) -> None: @@ -402,7 +406,7 @@ def _build_item_paths(self, leaf: Dict[str, Any], path: List[Dict[str, Any]]) -> for name, child_leaf in leaf["children"].items(): self._build_item_paths(child_leaf, path) path.pop() - elif leaf["type"] != LeafType.ROOT: + if leaf["type"] != LeafType.ROOT: self._tree_path[leaf["item"]] = path + [leaf] @check_rp_enabled @@ -1064,6 +1068,12 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: children_leafs = rule_leaf["children"] scenario_leaf = self._create_leaf(LeafType.CODE, rule_leaf, scenario) children_leafs[scenario] = scenario_leaf + children_leafs = scenario_leaf["children"] + background = feature.background + if background: + if background not in children_leafs: + background_leaf = self._create_leaf(LeafType.NESTED, rule_leaf, background) + children_leafs[background] = background_leaf self._remove_file_names(root_leaf) self._generate_names(root_leaf) if not self._config.rp_hierarchy_code: @@ -1111,6 +1121,15 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) leaf["attributes"] = self._process_bdd_attributes(scenario) + def _finish_bdd_step(self, leaf: Dict[str, Any], status: str) -> None: + if leaf["exec"] != ExecStatus.IN_PROGRESS: + return + + reporter = self.rp.step_reporter + item_id = leaf["item_id"] + reporter.finish_nested_step(item_id, timestamp(), status) + leaf["exec"] = ExecStatus.FINISHED + @check_rp_enabled def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: """Start BDD step. @@ -1129,21 +1148,23 @@ def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> No scenario_leaf["item_id"] = self._start_step(self._build_start_step_rq(scenario_leaf)) scenario_leaf["exec"] = ExecStatus.IN_PROGRESS reporter = self.rp.step_reporter - item_id = reporter.start_nested_step(f"{step.keyword} {step.name}", timestamp()) step_leaf = self._create_leaf(LeafType.NESTED, scenario_leaf, step) - scenario_leaf["children"][step] = step_leaf + if step.background: + background_leaf = scenario_leaf["children"][step.background] + background_leaf["children"][step] = step_leaf + if background_leaf["exec"] != ExecStatus.IN_PROGRESS: + item_id = reporter.start_nested_step(BACKGROUND_STEP_NAME, timestamp()) + background_leaf["item_id"] = item_id + background_leaf["exec"] = ExecStatus.IN_PROGRESS + else: + scenario_leaf["children"][step] = step_leaf + if feature.background: + background_leaf = scenario_leaf["children"][feature.background] + self._finish_bdd_step(background_leaf, "PASSED") + item_id = reporter.start_nested_step(f"{step.keyword} {step.name}", timestamp()) step_leaf["item_id"] = item_id step_leaf["exec"] = ExecStatus.IN_PROGRESS - def _finish_bdd_step(self, leaf: Dict[str, Any], status: str) -> None: - if leaf["exec"] != ExecStatus.IN_PROGRESS: - return - - reporter = self.rp.step_reporter - item_id = leaf["item_id"] - reporter.finish_nested_step(item_id, timestamp(), status) - leaf["exec"] = ExecStatus.FINISHED - @check_rp_enabled def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: """Finish BDD step. @@ -1173,7 +1194,10 @@ def finish_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step scenario_leaf = self._tree_path[scenario][-1] scenario_leaf["status"] = "FAILED" - step_leaf = scenario_leaf["children"][step] + if step.background: + step_leaf = scenario_leaf["children"][step.background]["children"][step] + else: + step_leaf = scenario_leaf["children"][step] item_id = step_leaf["item_id"] traceback_str = "\n".join( traceback.format_exception(type(exception), value=exception, tb=exception.__traceback__) @@ -1183,6 +1207,9 @@ def finish_bdd_step_error(self, feature: Feature, scenario: Scenario, step: Step client.log(**exception_log) self._finish_bdd_step(step_leaf, "FAILED") + if step.background: + background_leaf = scenario_leaf["children"][step.background] + self._finish_bdd_step(background_leaf, "FAILED") def start(self) -> None: """Start servicing Report Portal requests.""" From f958ef1fcc22655ae12118ae056831749e3f45d3 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 20 Feb 2025 18:41:44 +0300 Subject: [PATCH 075/110] Pytest BDD implementation: finish Backgrounds --- pytest_reportportal/service.py | 28 +++++++++++++++++++++++--- tests/integration/test_bdd.py | 36 ++++++++++++++++++---------------- 2 files changed, 44 insertions(+), 20 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 22ef982..d956bd7 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1149,8 +1149,17 @@ def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> No scenario_leaf["exec"] = ExecStatus.IN_PROGRESS reporter = self.rp.step_reporter step_leaf = self._create_leaf(LeafType.NESTED, scenario_leaf, step) - if step.background: - background_leaf = scenario_leaf["children"][step.background] + background_steps = [] + if feature.background: + background_steps = feature.background.steps + if next( + filter( + lambda s: s.name == step.name and s.keyword == step.keyword and s.line_number == step.line_number, + background_steps, + ), + None, + ): + background_leaf = scenario_leaf["children"][feature.background] background_leaf["children"][step] = step_leaf if background_leaf["exec"] != ExecStatus.IN_PROGRESS: item_id = reporter.start_nested_step(BACKGROUND_STEP_NAME, timestamp()) @@ -1177,7 +1186,20 @@ def finish_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> N return scenario_leaf = self._tree_path[scenario][-1] - step_leaf = scenario_leaf["children"][step] + background_steps = [] + if feature.background: + background_steps = feature.background.steps + if next( + filter( + lambda s: s.name == step.name and s.keyword == step.keyword and s.line_number == step.line_number, + background_steps, + ), + None, + ): + parent_leaf = scenario_leaf["children"][feature.background] + else: + parent_leaf = scenario_leaf + step_leaf = parent_leaf["children"][step] self._finish_bdd_step(step_leaf, "PASSED") @check_rp_enabled diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 4cd534d..3b88299 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -34,8 +34,10 @@ def generate_item_id(*args, **kwargs) -> str: name = args[0] else: name = kwargs["name"] - ITEM_ID_DICT[name] += 1 - item_id = f"{name}_{ITEM_ID_DICT[name]}" + count = ITEM_ID_DICT[name] + count += 1 + ITEM_ID_DICT[name] = count + item_id = f"{name}_{count}" ITEM_ID_LIST.append(item_id) return item_id @@ -291,48 +293,48 @@ def test_bdd_background_step(mock_client_init): # Verify the Background step for the first scenario background_call_1 = mock_client.start_test_item.call_args_list[1] - assert background_call_1[0][0] == "Background: Init our scenario" - assert background_call_1[1]["item_type"] == "STEP" + assert background_call_1[0][0] == "Background" + assert background_call_1[0][2] == "step" assert background_call_1[1]["has_stats"] is False - assert background_call_1[1]["parent_item_id"] == scenario_call_1[0][0] + assert background_call_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" # Verify the nested steps within the Background for the first scenario nested_step_call_1 = mock_client.start_test_item.call_args_list[2] assert nested_step_call_1[0][0] == "Given I have empty step" - assert nested_step_call_1[1]["item_type"] == "STEP" - assert nested_step_call_1[1]["parent_item_id"] == background_call_1[0][0] + assert nested_step_call_1[0][2] == "step" + assert nested_step_call_1[1]["parent_item_id"] == background_call_1[0][0] + "_1" assert nested_step_call_1[1]["has_stats"] is False # Verify the step within the first scenario scenario_step_call_1 = mock_client.start_test_item.call_args_list[3] assert scenario_step_call_1[0][0] == "Then I have another empty step" - assert scenario_step_call_1[1]["item_type"] == "STEP" - assert scenario_step_call_1[1]["parent_item_id"] == scenario_call_1[0][0] + assert scenario_step_call_1[0][2] == "step" + assert scenario_step_call_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" assert scenario_step_call_1[1]["has_stats"] is False # Verify the second scenario scenario_call_2 = mock_client.start_test_item.call_args_list[4] - assert scenario_call_2[0][0] == "Feature: Test scenario with a background - Scenario: The second scenario" + assert scenario_call_2[1]["name"] == "Feature: Test scenario with a background - Scenario: The second scenario" assert scenario_call_2[1]["item_type"] == "STEP" assert scenario_call_1[1].get("has_stats", True) # Verify the Background step for the second scenario background_call_2 = mock_client.start_test_item.call_args_list[5] - assert background_call_2[0][0] == "Background: Init our scenario" - assert background_call_2[1]["item_type"] == "STEP" + assert background_call_2[0][0] == "Background" + assert background_call_2[0][2] == "step" assert background_call_2[1]["has_stats"] is False - assert background_call_2[1]["parent_item_id"] == scenario_call_2[0][0] + assert background_call_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_1" # Verify the nested steps within the Background for the second scenario nested_step_call_2 = mock_client.start_test_item.call_args_list[6] assert nested_step_call_2[0][0] == "Given I have empty step" - assert nested_step_call_2[1]["item_type"] == "STEP" - assert nested_step_call_2[1]["parent_item_id"] == background_call_2[0][0] + assert nested_step_call_2[0][2] == "step" + assert nested_step_call_2[1]["parent_item_id"] == background_call_2[0][0] + "_2" assert nested_step_call_2[1]["has_stats"] is False # Verify the step within the second scenario scenario_step_call_2 = mock_client.start_test_item.call_args_list[7] assert scenario_step_call_2[0][0] == "Then I have one more empty step" - assert scenario_step_call_2[1]["item_type"] == "STEP" - assert scenario_step_call_2[1]["parent_item_id"] == scenario_call_2[0][0] + assert scenario_step_call_2[0][2] == "step" + assert scenario_step_call_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_1" assert scenario_step_call_2[1]["has_stats"] is False From 611789560f4bf64d8a806a7de32b3c44208f0178 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 21 Feb 2025 14:36:54 +0300 Subject: [PATCH 076/110] Fix item merge --- pytest_reportportal/service.py | 2 +- tests/integration/test_fixtures.py | 35 +++++++++++++++++++----------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index d956bd7..f049ee0 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -392,7 +392,7 @@ def _merge_leaf_types(self, test_tree: Dict[str, Any], leaf_types: Set, separato self._merge_leaf_types(child_leaf, leaf_types, separator) def _merge_dirs(self, test_tree: Dict[str, Any]) -> None: - self._merge_leaf_types(test_tree, {LeafType.DIR}, self._config.rp_dir_path_separator) + self._merge_leaf_types(test_tree, {LeafType.DIR, LeafType.FILE}, self._config.rp_dir_path_separator) def _merge_code_with_separator(self, test_tree: Dict[str, Any], separator: str) -> None: self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE}, separator) diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index df01654..134cd26 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -92,9 +92,8 @@ def test_fixture_on_off(mock_client_init, switch): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count expected_count = 3 if switch else 1 - assert ( - start_count == finish_count == expected_count - ), 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == expected_count, 'Incorrect number of "start_test_item" calls' + assert finish_count == expected_count, 'Incorrect number of "finish_test_item" calls' def run_tests(test_path, should_fail=False): @@ -116,7 +115,8 @@ def test_fixture_setup(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -153,7 +153,8 @@ def test_fixture_teardown(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -203,7 +204,8 @@ def test_fixture_setup_failure(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 2, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 2, 'Incorrect number of "start_test_item" calls' + assert finish_count == 2, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -252,7 +254,8 @@ def test_fixture_teardown_failure(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -309,7 +312,8 @@ def test_fixture_yield_none(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -347,7 +351,8 @@ def test_fixture_return_none(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -385,7 +390,8 @@ def test_failure_fixture_teardown(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 3, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 3, 'Incorrect number of "start_test_item" calls' + assert finish_count == 3, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -442,7 +448,8 @@ def test_session_fixture_setup(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 4, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 4, 'Incorrect number of "start_test_item" calls' + assert finish_count == 4, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -496,7 +503,8 @@ def test_module_fixture_setup(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 4, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 4, 'Incorrect number of "start_test_item" calls' + assert finish_count == 4, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] @@ -523,7 +531,8 @@ def test_class_fixture_setup(mock_client_init): start_count = mock_client.start_test_item.call_count finish_count = mock_client.finish_test_item.call_count - assert start_count == finish_count == 8, 'Incorrect number of "start_test_item" or "finish_test_item" calls' + assert start_count == 8, 'Incorrect number of "start_test_item" calls' + assert finish_count == 8, 'Incorrect number of "finish_test_item" calls' call_args = mock_client.start_test_item.call_args_list setup_call_args = call_args[1][0] From bee4fd2fc4a98ad9b8db7a743f1dc2acbb9b9539 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 21 Feb 2025 14:54:09 +0300 Subject: [PATCH 077/110] Fix item merge --- pytest_reportportal/service.py | 2 +- tests/integration/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index f049ee0..e399581 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -395,7 +395,7 @@ def _merge_dirs(self, test_tree: Dict[str, Any]) -> None: self._merge_leaf_types(test_tree, {LeafType.DIR, LeafType.FILE}, self._config.rp_dir_path_separator) def _merge_code_with_separator(self, test_tree: Dict[str, Any], separator: str) -> None: - self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE}, separator) + self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE, LeafType.DIR}, separator) def _merge_code(self, test_tree: Dict[str, Any]) -> None: self._merge_code_with_separator(test_tree, "::") diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 5895d96..4a238f7 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -264,7 +264,7 @@ "parent_item_id": lambda x: x is None, } ], - [{"name": "examples/test_simple", "item_type": "STEP", "parent_item_id": lambda x: x is None}], + [{"name": "examples::test_simple", "item_type": "STEP", "parent_item_id": lambda x: x is None}], [{"name": "test_simple", "item_type": "STEP", "parent_item_id": lambda x: x is None}], ] From e0d2d2a9cfdc73114a775b8f435461c46518be09 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 21 Feb 2025 14:57:01 +0300 Subject: [PATCH 078/110] Unignore tests --- tests/integration/test_bdd.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 3b88299..4bf5e5c 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -76,7 +76,7 @@ def setup_mock_for_logging(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def basic_bdd(mock_client_init): +def test_basic_bdd(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {} @@ -120,7 +120,7 @@ def basic_bdd(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def basic_bdd_with_feature_suite(mock_client_init): +def test_basic_bdd_with_feature_suite(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {"rp_hierarchy_code": True} @@ -157,7 +157,7 @@ def basic_bdd_with_feature_suite(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def bdd_scenario_descriptions(mock_client_init): +def test_bdd_scenario_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -178,7 +178,7 @@ def bdd_scenario_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def bdd_feature_descriptions(mock_client_init): +def test_bdd_feature_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {"rp_hierarchy_code": True} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -192,7 +192,7 @@ def bdd_feature_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def bdd_failed_feature(mock_client_init): +def test_bdd_failed_feature(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {} @@ -229,7 +229,7 @@ def bdd_failed_feature(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def bdd_scenario_attributes(mock_client_init): +def test_bdd_scenario_attributes(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) @@ -248,7 +248,7 @@ def bdd_scenario_attributes(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def bdd_feature_attributes(mock_client_init): +def test_bdd_feature_attributes(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) From 9eec76a9439868aea36bde4a878f8b21c407c652 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 21 Feb 2025 15:15:34 +0300 Subject: [PATCH 079/110] Add another test --- .../bdd/features/background_two_steps.feature | 7 +++ .../step_defs/test_background_two_steps.py | 35 +++++++++++++ tests/integration/test_bdd.py | 52 +++++++++++++++++++ 3 files changed, 94 insertions(+) create mode 100644 examples/bdd/features/background_two_steps.feature create mode 100644 examples/bdd/step_defs/test_background_two_steps.py diff --git a/examples/bdd/features/background_two_steps.feature b/examples/bdd/features/background_two_steps.feature new file mode 100644 index 0000000..87075ea --- /dev/null +++ b/examples/bdd/features/background_two_steps.feature @@ -0,0 +1,7 @@ +Feature: Test scenario with a background with two steps + Background: + Given I have first empty step + And I have second empty step + + Scenario: The scenario + Then I have main step diff --git a/examples/bdd/step_defs/test_background_two_steps.py b/examples/bdd/step_defs/test_background_two_steps.py new file mode 100644 index 0000000..ad81b76 --- /dev/null +++ b/examples/bdd/step_defs/test_background_two_steps.py @@ -0,0 +1,35 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pytest_bdd import given, scenarios, then + +scenarios("../features/background_two_steps.feature") + + +@given("I have first empty step") +def first_empty_step(): + """First empty step implementation.""" + pass + + +@given("I have second empty step") +def second_empty_step(): + """Second empty step implementation.""" + pass + + +@then("I have main step") +def main_step(): + """Main step implementation.""" + pass diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 4bf5e5c..de573b3 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -30,6 +30,8 @@ def generate_item_id(*args, **kwargs) -> str: + global ITEM_ID_DICT + global ITEM_ID_LIST if args: name = args[0] else: @@ -43,11 +45,13 @@ def generate_item_id(*args, **kwargs) -> str: def get_last_item_id() -> Optional[str]: + global ITEM_ID_LIST if len(ITEM_ID_LIST) > 0: return ITEM_ID_LIST[-1] def remove_last_item_id(*_, **__) -> Optional[str]: + global ITEM_ID_LIST if len(ITEM_ID_LIST) > 0: return ITEM_ID_LIST.pop() @@ -338,3 +342,51 @@ def test_bdd_background_step(mock_client_init): assert scenario_step_call_2[0][2] == "step" assert scenario_step_call_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_1" assert scenario_step_call_2[1]["has_stats"] is False + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_background_two_steps(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + + variables = {} + variables.update(utils.DEFAULT_VARIABLES.items()) + test_file = "examples/bdd/step_defs/test_background_two_steps.py" + result = utils.run_pytest_tests(tests=[test_file], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify the scenario + scenario_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call[1]["name"] == "Feature: Test scenario with a background with two steps - Scenario: The scenario" + ) + assert scenario_call[1]["item_type"] == "STEP" + assert scenario_call[1].get("has_stats", True) + + # Verify the Background step + background_call = mock_client.start_test_item.call_args_list[1] + assert background_call[0][0] == "Background" + assert background_call[0][2] == "step" + assert background_call[1]["has_stats"] is False + assert background_call[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + + # Verify the first nested step within the Background + nested_step_call_1 = mock_client.start_test_item.call_args_list[2] + assert nested_step_call_1[0][0] == "Given I have first empty step" + assert nested_step_call_1[0][2] == "step" + assert nested_step_call_1[1]["parent_item_id"] == background_call[0][0] + "_3" + assert nested_step_call_1[1]["has_stats"] is False + + # Verify the second nested step within the Background + nested_step_call_2 = mock_client.start_test_item.call_args_list[3] + assert nested_step_call_2[0][0] == "And I have second empty step" + assert nested_step_call_2[0][2] == "step" + assert nested_step_call_2[1]["parent_item_id"] == background_call[0][0] + "_3" + assert nested_step_call_2[1]["has_stats"] is False + + # Verify the scenario step + scenario_step_call = mock_client.start_test_item.call_args_list[4] + assert scenario_step_call[0][0] == "Then I have main step" + assert scenario_step_call[0][2] == "step" + assert scenario_step_call[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + assert scenario_step_call[1]["has_stats"] is False From bacf9fdb91b1065cd87fb45968962fdc3471545d Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 21 Feb 2025 18:11:49 +0300 Subject: [PATCH 080/110] Pytest BDD implementation: Rule keyword: WIP --- examples/bdd/step_defs/test_rule_steps.py | 43 +++++++++++++++++++++++ pytest_reportportal/service.py | 35 +++++++++++++++--- tests/integration/test_bdd.py | 8 +++++ 3 files changed, 81 insertions(+), 5 deletions(-) create mode 100644 examples/bdd/step_defs/test_rule_steps.py diff --git a/examples/bdd/step_defs/test_rule_steps.py b/examples/bdd/step_defs/test_rule_steps.py new file mode 100644 index 0000000..e01d308 --- /dev/null +++ b/examples/bdd/step_defs/test_rule_steps.py @@ -0,0 +1,43 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rule keyword test module.""" +from pytest_bdd import given, scenarios, then + + +scenarios("../features/rule_keyword.feature") + + +@given("I have empty step") +def empty_step(): + """Empty step implementation.""" + pass + + +@then("I have another empty step") +def another_empty_step(): + """Another empty step implementation.""" + pass + + +@then("I have one more empty step") +def one_more_empty_step(): + """One more empty step implementation.""" + pass + + +@then("I have one more else empty step") +def one_more_else_empty_step(): + """One more else empty step implementation.""" + pass diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index e399581..1885acc 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -67,6 +67,12 @@ make_python_name: Callable[[str], str] = lambda x: x PYTEST_BDD = False +try: + # noinspection PyPackageRequirements + from pytest_bdd.parser import Rule +except ImportError: + Rule = type("dummy", (), {}) # Old pytest-bdd versions do not have Rule + from reportportal_client import RP, create_client from reportportal_client.helpers import dict_to_payload, gen_attributes, get_launch_sys_attrs, get_package_version @@ -366,9 +372,18 @@ def _generate_names(self, test_tree: Dict[str, Any]) -> None: elif isinstance(item, Scenario): keyword = getattr(item, "keyword", "Scenario") test_tree["name"] = f"{keyword}: {item.name}" + elif isinstance(item, Rule): + keyword = getattr(item, "keyword", "Rule") + test_tree["name"] = f"{keyword}: {item.name}" else: test_tree["name"] = item.name + if test_tree["type"] == LeafType.SUITE: + item = test_tree["item"] + if isinstance(item, Rule): + keyword = getattr(item, "keyword", "Rule") + test_tree["name"] = f"{keyword}: {item.name}" + for item, child_leaf in test_tree["children"].items(): self._generate_names(child_leaf) @@ -395,7 +410,7 @@ def _merge_dirs(self, test_tree: Dict[str, Any]) -> None: self._merge_leaf_types(test_tree, {LeafType.DIR, LeafType.FILE}, self._config.rp_dir_path_separator) def _merge_code_with_separator(self, test_tree: Dict[str, Any], separator: str) -> None: - self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE, LeafType.DIR}, separator) + self._merge_leaf_types(test_tree, {LeafType.CODE, LeafType.FILE, LeafType.DIR, LeafType.SUITE}, separator) def _merge_code(self, test_tree: Dict[str, Any]) -> None: self._merge_code_with_separator(test_tree, "::") @@ -454,7 +469,7 @@ def _get_item_description(self, test_item: Any) -> Optional[str]: return trim_docstring(doc) if isinstance(test_item, DoctestItem): return test_item.reportinfo()[2] - if isinstance(test_item, (Feature, Scenario, ScenarioTemplate)): + if isinstance(test_item, (Feature, Scenario, ScenarioTemplate, Rule)): description = test_item.description if description: return description @@ -472,7 +487,7 @@ def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> return func(leaf) return func(leaf) - def _process_bdd_attributes(self, scenario: Union[Feature, Scenario]) -> List[Dict[str, str]]: + def _process_bdd_attributes(self, scenario: Union[Feature, Scenario, Rule]) -> List[Dict[str, str]]: attributes = [] for tag in scenario.tags: key = None @@ -485,7 +500,7 @@ def _process_bdd_attributes(self, scenario: Union[Feature, Scenario]) -> List[Di attributes.append(attribute) return attributes - def _build_start_suite_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: + def _get_suite_code_ref(self, leaf: Dict[str, Any]) -> str: item = leaf["item"] if leaf["type"] == LeafType.DIR: code_ref = str(item) @@ -494,9 +509,16 @@ def _build_start_suite_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: code_ref = str(item.rel_filename) else: code_ref = str(item.fspath) + elif leaf["type"] == LeafType.SUITE: + code_ref = self._get_suite_code_ref(leaf["parent"]) + f"/[{type(item).__name__}:{item.name}]" else: code_ref = str(item.fspath) + return code_ref + + def _build_start_suite_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: + code_ref = self._get_suite_code_ref(leaf) parent_item_id = self._lock(leaf["parent"], lambda p: p.get("item_id")) if "parent" in leaf else None + item = leaf["item"] payload = { "name": self._truncate_item_name(leaf["name"]), "description": self._get_item_description(item), @@ -505,7 +527,7 @@ def _build_start_suite_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: "code_ref": code_ref, "parent_item_id": parent_item_id, } - if isinstance(item, Feature): + if isinstance(item, (Feature, Scenario, Rule)): payload["attributes"] = self._process_bdd_attributes(item) return payload @@ -1057,12 +1079,14 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: else: feature_leaf = self._create_leaf(LeafType.FILE, root_leaf, feature) children_leafs[feature] = feature_leaf + children_leafs = feature_leaf["children"] rule = getattr(scenario, "rule", None) if rule: if rule in children_leafs: rule_leaf = children_leafs[rule] else: rule_leaf = self._create_leaf(LeafType.SUITE, feature_leaf, rule) + children_leafs[rule] = rule_leaf else: rule_leaf = feature_leaf children_leafs = rule_leaf["children"] @@ -1074,6 +1098,7 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: if background not in children_leafs: background_leaf = self._create_leaf(LeafType.NESTED, rule_leaf, background) children_leafs[background] = background_leaf + self._remove_file_names(root_leaf) self._generate_names(root_leaf) if not self._config.rp_hierarchy_code: diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index de573b3..bf71148 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -390,3 +390,11 @@ def test_bdd_background_two_steps(mock_client_init): assert scenario_step_call[0][2] == "step" assert scenario_step_call[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" assert scenario_step_call[1]["has_stats"] is False + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_rule(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_rule_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" From 5b2d6507dcf4b8f64396f2bdcf2059d3792d4f47 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 21 Feb 2025 18:12:00 +0300 Subject: [PATCH 081/110] Pytest BDD implementation: Rule keyword: WIP --- examples/bdd/features/rule_keyword.feature | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 examples/bdd/features/rule_keyword.feature diff --git a/examples/bdd/features/rule_keyword.feature b/examples/bdd/features/rule_keyword.feature new file mode 100644 index 0000000..0b37315 --- /dev/null +++ b/examples/bdd/features/rule_keyword.feature @@ -0,0 +1,15 @@ +Feature: Test rule keyword + + Rule: The first rule + Scenario: The first scenario + Given I have empty step + Then I have another empty step + + Scenario: The second scenario + Given I have empty step + Then I have one more empty step + + Rule: The second rule + Scenario: The third scenario + Given I have empty step + Then I have one more else empty step From f4b9c969400413123528c9759b61c9552f7d72f1 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Mon, 24 Feb 2025 12:01:43 +0300 Subject: [PATCH 082/110] Fix tests --- tests/integration/test_bdd.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index bf71148..80c1a06 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -17,6 +17,7 @@ from typing import Optional from unittest import mock +import pytest from reportportal_client import set_current from reportportal_client.steps import StepReporter @@ -392,6 +393,7 @@ def test_bdd_background_two_steps(mock_client_init): assert scenario_step_call[1]["has_stats"] is False +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) def test_bdd_rule(mock_client_init): mock_client = setup_mock(mock_client_init) From e46ca72e24e8e44e663c160026567276a5eec53c Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Mon, 24 Feb 2025 14:38:00 +0300 Subject: [PATCH 083/110] Add more tests --- tests/integration/test_bdd.py | 181 ++++++++++++++++++++++++++++++---- 1 file changed, 161 insertions(+), 20 deletions(-) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 80c1a06..59fb01d 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -84,9 +84,7 @@ def setup_mock_for_logging(mock_client_init): def test_basic_bdd(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) - variables = {} - variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"], variables=variables) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"]) assert int(result) == 0, "Exit code should be 0 (no errors)" assert mock_client.start_test_item.call_count == 5, 'There should be exactly five "start_test_item" calls' @@ -164,11 +162,7 @@ def test_basic_bdd_with_feature_suite(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_bdd_scenario_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) - variables = {} - variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests( - tests=["examples/bdd/step_defs/test_arguments_description.py"], variables=variables - ) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments_description.py"]) assert int(result) == 0, "Exit code should be 0 (no errors)" code_ref = "features/arguments_four_steps_description.feature/[SCENARIO:Arguments for given, when, and, then]" @@ -200,9 +194,7 @@ def test_bdd_feature_descriptions(mock_client_init): def test_bdd_failed_feature(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) - variables = {} - variables.update(utils.DEFAULT_VARIABLES.items()) - result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_failed_step.py"], variables=variables) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_failed_step.py"]) assert int(result) == 1, "Exit code should be 1 (test error)" assert mock_client.start_test_item.call_count == 2, 'There should be exactly two "start_test_item" calls' @@ -238,10 +230,8 @@ def test_bdd_scenario_attributes(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) - variables = {} - variables.update(utils.DEFAULT_VARIABLES.items()) test_file = "examples/bdd/step_defs/test_belly.py" - result = utils.run_pytest_tests(tests=[test_file], variables=variables) + result = utils.run_pytest_tests(tests=[test_file]) assert int(result) == 0, "Exit code should be 0 (no errors)" scenario_call = mock_client.start_test_item.call_args_list[0] @@ -284,10 +274,8 @@ def test_bdd_background_step(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) - variables = {} - variables.update(utils.DEFAULT_VARIABLES.items()) test_file = "examples/bdd/step_defs/test_background.py" - result = utils.run_pytest_tests(tests=[test_file], variables=variables) + result = utils.run_pytest_tests(tests=[test_file]) assert int(result) == 0, "Exit code should be 0 (no errors)" # Verify the first scenario @@ -350,10 +338,8 @@ def test_bdd_background_two_steps(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) - variables = {} - variables.update(utils.DEFAULT_VARIABLES.items()) test_file = "examples/bdd/step_defs/test_background_two_steps.py" - result = utils.run_pytest_tests(tests=[test_file], variables=variables) + result = utils.run_pytest_tests(tests=[test_file]) assert int(result) == 0, "Exit code should be 0 (no errors)" # Verify the scenario @@ -400,3 +386,158 @@ def test_bdd_rule(mock_client_init): setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_rule_steps.py"]) assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario from first rule + scenario_1_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_1_call[1]["name"] + == "Feature: Test rule keyword - Rule: The first rule - Scenario: The first scenario" + ) + assert scenario_1_call[1]["item_type"] == "STEP" + assert scenario_1_call[1].get("has_stats", True) is True + assert scenario_1_call[1]["parent_item_id"] is None + assert ( + scenario_1_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The first rule]/[SCENARIO:The first scenario]" + ) + + # Verify first scenario steps + step_1_given = mock_client.start_test_item.call_args_list[1] + assert step_1_given[0][0] == "Given I have empty step" + assert step_1_given[0][2] == "step" + assert step_1_given[1]["parent_item_id"] == scenario_1_call[1]["name"] + "_1" + assert step_1_given[1]["has_stats"] is False + + step_1_then = mock_client.start_test_item.call_args_list[2] + assert step_1_then[0][0] == "Then I have another empty step" + assert step_1_then[0][2] == "step" + assert step_1_then[1]["parent_item_id"] == scenario_1_call[1]["name"] + "_1" + assert step_1_then[1]["has_stats"] is False + + # Verify second scenario from first rule + scenario_2_call = mock_client.start_test_item.call_args_list[3] + assert ( + scenario_2_call[1]["name"] + == "Feature: Test rule keyword - Rule: The first rule - Scenario: The second scenario" + ) + assert scenario_2_call[1]["item_type"] == "STEP" + assert scenario_2_call[1].get("has_stats", True) is True + assert scenario_2_call[1]["parent_item_id"] is None + assert ( + scenario_2_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The first rule]/[SCENARIO:The second scenario]" + ) + + # Verify second scenario steps + step_2_given = mock_client.start_test_item.call_args_list[4] + assert step_2_given[0][0] == "Given I have empty step" + assert step_2_given[0][2] == "step" + assert step_2_given[1]["parent_item_id"] == scenario_2_call[1]["name"] + "_1" + assert step_2_given[1]["has_stats"] is False + + step_2_then = mock_client.start_test_item.call_args_list[5] + assert step_2_then[0][0] == "Then I have one more empty step" + assert step_2_then[0][2] == "step" + assert step_2_then[1]["parent_item_id"] == scenario_2_call[1]["name"] + "_1" + assert step_2_then[1]["has_stats"] is False + + # Verify third scenario from second rule + scenario_3_call = mock_client.start_test_item.call_args_list[6] + assert ( + scenario_3_call[1]["name"] + == "Feature: Test rule keyword - Rule: The second rule - Scenario: The third scenario" + ) + assert scenario_3_call[1]["item_type"] == "STEP" + assert scenario_3_call[1].get("has_stats", True) is True + assert scenario_3_call[1]["parent_item_id"] is None + assert ( + scenario_3_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The second rule]/[SCENARIO:The third scenario]" + ) + + # Verify third scenario steps + step_3_given = mock_client.start_test_item.call_args_list[7] + assert step_3_given[0][0] == "Given I have empty step" + assert step_3_given[0][2] == "step" + assert step_3_given[1]["parent_item_id"] == scenario_3_call[1]["name"] + "_1" + assert step_3_given[1]["has_stats"] is False + + step_3_then = mock_client.start_test_item.call_args_list[8] + assert step_3_then[0][0] == "Then I have one more else empty step" + assert step_3_then[0][2] == "step" + assert step_3_then[1]["parent_item_id"] == scenario_3_call[1]["name"] + "_1" + assert step_3_then[1]["has_stats"] is False + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_bdd_rule_hierarchy(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_rule_steps.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify Feature + feature_call = mock_client.start_test_item.call_args_list[0] + assert feature_call[1]["name"] == "Feature: Test rule keyword" + assert feature_call[1]["item_type"] == "SUITE" + assert feature_call[1].get("has_stats", True) is True + assert feature_call[1]["parent_item_id"] is None + feature_id = "Feature: Test rule keyword_1" + + # Verify first Rule + rule_1_call = mock_client.start_test_item.call_args_list[1] + assert rule_1_call[1]["name"] == "Rule: The first rule" + assert rule_1_call[1]["item_type"] == "SUITE" + assert rule_1_call[1].get("has_stats", True) is True + assert rule_1_call[1]["parent_item_id"] == feature_id + rule_1_id = "Rule: The first rule_1" + + # Verify first scenario under first rule + scenario_1_call = mock_client.start_test_item.call_args_list[2] + assert scenario_1_call[1]["name"] == "Scenario: The first scenario" + assert scenario_1_call[1]["item_type"] == "STEP" + assert scenario_1_call[1].get("has_stats", True) is True + assert scenario_1_call[1]["parent_item_id"] == rule_1_id + assert ( + scenario_1_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The first rule]/[SCENARIO:The first scenario]" + ) + + # Verify second scenario under first rule + scenario_2_call = mock_client.start_test_item.call_args_list[5] + assert scenario_2_call[1]["name"] == "Scenario: The second scenario" + assert scenario_2_call[1]["item_type"] == "STEP" + assert scenario_2_call[1].get("has_stats", True) is True + assert scenario_2_call[1]["parent_item_id"] == rule_1_id + assert ( + scenario_2_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The first rule]/[SCENARIO:The second scenario]" + ) + + # Verify second Rule + rule_2_call = mock_client.start_test_item.call_args_list[8] + assert rule_2_call[1]["name"] == "Rule: The second rule" + assert rule_2_call[1]["item_type"] == "SUITE" + assert rule_2_call[1].get("has_stats", True) is True + assert rule_2_call[1]["parent_item_id"] == feature_id + rule_2_id = "Rule: The second rule_1" + + # Verify third scenario under second rule + scenario_3_call = mock_client.start_test_item.call_args_list[9] + assert scenario_3_call[1]["name"] == "Scenario: The third scenario" + assert scenario_3_call[1]["item_type"] == "STEP" + assert scenario_3_call[1].get("has_stats", True) is True + assert scenario_3_call[1]["parent_item_id"] == rule_2_id + assert ( + scenario_3_call[1]["code_ref"] + == "features/rule_keyword.feature/[RULE:The second rule]/[SCENARIO:The third scenario]" + ) From 3cddf895c5fad7de80c2b0764ce43e273549fffe Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Mon, 24 Feb 2025 18:22:15 +0300 Subject: [PATCH 084/110] Pytest BDD implementation: Scenario Outline: WIP --- .../scenario_outline_parameters.feature | 12 ++ .../scenario_outline_parameters_steps.py | 37 +++++ pytest_reportportal/service.py | 19 +++ tests/integration/test_bdd.py | 137 ++++++++++++++++-- 4 files changed, 194 insertions(+), 11 deletions(-) create mode 100644 examples/bdd/features/scenario_outline_parameters.feature create mode 100644 examples/bdd/step_defs/scenario_outline_parameters_steps.py diff --git a/examples/bdd/features/scenario_outline_parameters.feature b/examples/bdd/features/scenario_outline_parameters.feature new file mode 100644 index 0000000..6913cbe --- /dev/null +++ b/examples/bdd/features/scenario_outline_parameters.feature @@ -0,0 +1,12 @@ +Feature: Basic test with parameters + + Scenario Outline: Test with different parameters + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | + | "third" | 12345678 | diff --git a/examples/bdd/step_defs/scenario_outline_parameters_steps.py b/examples/bdd/step_defs/scenario_outline_parameters_steps.py new file mode 100644 index 0000000..2bfdd88 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_parameters_steps.py @@ -0,0 +1,37 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pytest_bdd import given, when, then, parsers, scenarios + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_parameters.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 1885acc..a361879 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1124,6 +1124,22 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) + def _get_scenario_template(self, scenario: Scenario) -> Optional[ScenarioTemplate]: + line_num = scenario.line_number + feature = scenario.feature + scenario_template = None + for template in feature.scenarios.values(): + if template.line_number == line_num: + scenario_template = template + break + if scenario_template and isinstance(scenario_template, ScenarioTemplate): + return scenario_template + + def _get_scenario_parameter_from_template( + self, scenario: Scenario, scenario_template: ScenarioTemplate + ) -> Optional[Dict[str, Any]]: + pass + def _get_scenario_code_ref(self, scenario: Scenario) -> str: code_ref = scenario.feature.rel_filename + "/" rule = getattr(scenario, "rule", None) @@ -1142,6 +1158,9 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: :param leaf: item context """ scenario = leaf["item"] + scenario_template = self._get_scenario_template(scenario) + if scenario_template: + leaf["parameters"] = self._get_scenario_parameter_from_template(scenario, scenario_template) leaf["code_ref"] = self._get_scenario_code_ref(scenario) leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) leaf["attributes"] = self._process_bdd_attributes(scenario) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 59fb01d..0685bdb 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -81,7 +81,7 @@ def setup_mock_for_logging(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_basic_bdd(mock_client_init): +def test_basic(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"]) @@ -123,7 +123,7 @@ def test_basic_bdd(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_basic_bdd_with_feature_suite(mock_client_init): +def test_basic_with_feature_suite(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) variables = {"rp_hierarchy_code": True} @@ -160,7 +160,7 @@ def test_basic_bdd_with_feature_suite(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_scenario_descriptions(mock_client_init): +def test_scenario_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments_description.py"]) assert int(result) == 0, "Exit code should be 0 (no errors)" @@ -177,7 +177,7 @@ def test_bdd_scenario_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_feature_descriptions(mock_client_init): +def test_feature_descriptions(mock_client_init): mock_client = setup_mock(mock_client_init) variables = {"rp_hierarchy_code": True} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -191,7 +191,7 @@ def test_bdd_feature_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_failed_feature(mock_client_init): +def test_failed_feature(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_failed_step.py"]) @@ -226,7 +226,7 @@ def test_bdd_failed_feature(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_scenario_attributes(mock_client_init): +def test_scenario_attributes(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) @@ -243,7 +243,7 @@ def test_bdd_scenario_attributes(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_feature_attributes(mock_client_init): +def test_feature_attributes(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) @@ -270,7 +270,7 @@ def test_bdd_feature_attributes(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_background_step(mock_client_init): +def test_background_step(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) @@ -334,7 +334,7 @@ def test_bdd_background_step(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_background_two_steps(mock_client_init): +def test_background_two_steps(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) @@ -381,7 +381,7 @@ def test_bdd_background_two_steps(mock_client_init): @pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_rule(mock_client_init): +def test_rule(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_rule_steps.py"]) @@ -476,7 +476,7 @@ def test_bdd_rule(mock_client_init): @pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) -def test_bdd_rule_hierarchy(mock_client_init): +def test_rule_hierarchy(mock_client_init): mock_client = setup_mock(mock_client_init) setup_mock_for_logging(mock_client_init) @@ -541,3 +541,118 @@ def test_bdd_rule_hierarchy(mock_client_init): scenario_3_call[1]["code_ref"] == "features/rule_keyword.feature/[RULE:The second rule]/[SCENARIO:The third scenario]" ) + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_parameters(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_parameters_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario with parameters + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call_1[1]["name"] + == "Feature: Basic test with parameters - Scenario Outline: Test with different parameters" + ) + assert scenario_call_1[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + assert ( + scenario_call_1[1]["code_ref"] + == 'features/scenario_outline_parameters.feature/[SCENARIO:Test with different parameters["first",123]]' + ) + assert ("str", "first") in scenario_call_1[1]["parameters"].items() + assert ("parameters", 123) in scenario_call_1[1]["parameters"].items() + assert scenario_call_1[1]["description"] is not None + assert scenario_call_1[1]["description"].endswith('| "first" | 123 |') + + # Verify steps for first scenario + given_step_1 = mock_client.start_test_item.call_args_list[1] + assert given_step_1[0][0] == "Given It is test with parameters" + assert given_step_1[0][2] == "step" + assert given_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert given_step_1[1]["has_stats"] is False + + when_step_1 = mock_client.start_test_item.call_args_list[2] + assert when_step_1[0][0] == 'When I have parameter "first"' + assert when_step_1[0][2] == "step" + assert when_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert when_step_1[1]["has_stats"] is False + + then_step_1 = mock_client.start_test_item.call_args_list[3] + assert then_step_1[0][0] == "Then I emit number 123 on level info" + assert then_step_1[0][2] == "step" + assert then_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert then_step_1[1]["has_stats"] is False + + # Verify second scenario with parameters + scenario_call_2 = mock_client.start_test_item.call_args_list[4] + assert ( + scenario_call_2[1]["name"] + == "Feature: Basic test with parameters - Scenario Outline: Test with different parameters" + ) + assert scenario_call_2[1]["item_type"] == "STEP" + assert scenario_call_2[1].get("has_stats", True) + assert ( + scenario_call_2[1]["code_ref"] + == 'features/scenario_outline_parameters.feature/[SCENARIO:Test with different parameters["second",12345]]' + ) + assert ("str", "second") in scenario_call_2[1]["parameters"].items() + assert ("parameters", 12345) in scenario_call_2[1]["parameters"].items() + assert scenario_call_2[1]["description"] is not None + assert scenario_call_2[1]["description"].endswith('| "second" | 12345 |') + + # Verify steps for second scenario + given_step_2 = mock_client.start_test_item.call_args_list[5] + assert given_step_2[0][0] == "Given It is test with parameters" + assert given_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert given_step_2[1]["has_stats"] is False + + when_step_2 = mock_client.start_test_item.call_args_list[6] + assert when_step_2[0][0] == 'When I have parameter "second"' + assert when_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert when_step_2[1]["has_stats"] is False + + then_step_2 = mock_client.start_test_item.call_args_list[7] + assert then_step_2[0][0] == "Then I emit number 12345 on level info" + assert then_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert then_step_2[1]["has_stats"] is False + + # Verify third scenario with parameters + scenario_call_3 = mock_client.start_test_item.call_args_list[8] + assert ( + scenario_call_3[1]["name"] + == "Feature: Basic test with parameters - Scenario Outline: Test with different parameters" + ) + assert scenario_call_3[1]["item_type"] == "STEP" + assert scenario_call_3[1].get("has_stats", True) + assert ( + scenario_call_3[1]["code_ref"] + == 'features/scenario_outline_parameters.feature/[SCENARIO:Test with different parameters["third",12345678]]' + ) + assert ("str", "third") in scenario_call_3[1]["parameters"].items() + assert ("parameters", 12345678) in scenario_call_3[1]["parameters"].items() + assert scenario_call_3[1]["description"] is not None + assert scenario_call_3[1]["description"].endswith('| "third" | 12345678 |') + + # Verify steps for third scenario + given_step_3 = mock_client.start_test_item.call_args_list[9] + assert given_step_3[0][0] == "Given It is test with parameters" + assert given_step_3[1]["parent_item_id"] == scenario_call_3[1]["name"] + "_3" + assert given_step_3[1]["has_stats"] is False + + when_step_3 = mock_client.start_test_item.call_args_list[10] + assert when_step_3[0][0] == 'When I have parameter "third"' + assert when_step_3[1]["parent_item_id"] == scenario_call_3[1]["name"] + "_3" + assert when_step_3[1]["has_stats"] is False + + then_step_3 = mock_client.start_test_item.call_args_list[11] + assert then_step_3[0][0] == "Then I emit number 12345678 on level info" + assert then_step_3[1]["parent_item_id"] == scenario_call_3[1]["name"] + "_3" + assert then_step_3[1]["has_stats"] is False + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" From 54075638788aa4e9dc13970330696c67c3a7e2ee Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 25 Feb 2025 18:31:04 +0300 Subject: [PATCH 085/110] Pytest BDD implementation: Scenario Outline: done --- pytest_reportportal/service.py | 103 +++++++++++++++++++++++++++++---- tests/integration/test_bdd.py | 33 +++++++---- 2 files changed, 114 insertions(+), 22 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index a361879..5f33b65 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -469,7 +469,7 @@ def _get_item_description(self, test_item: Any) -> Optional[str]: return trim_docstring(doc) if isinstance(test_item, DoctestItem): return test_item.reportinfo()[2] - if isinstance(test_item, (Feature, Scenario, ScenarioTemplate, Rule)): + if isinstance(test_item, Feature): description = test_item.description if description: return description @@ -764,6 +764,7 @@ def _process_metadata_item_start(self, leaf: Dict[str, Any]) -> None: """ item = leaf["item"] leaf["name"] = self._process_item_name(leaf) + leaf["description"] = self._get_item_description(item) leaf["parameters"] = self._get_parameters(item) leaf["code_ref"] = self._get_code_ref(item) leaf["test_case_id"] = self._process_test_case_id(leaf) @@ -784,7 +785,7 @@ def _build_start_step_rq(self, leaf: Dict[str, Any]) -> Dict[str, Any]: payload = { "attributes": leaf.get("attributes", None), "name": self._truncate_item_name(leaf["name"]), - "description": self._get_item_description(leaf["item"]), + "description": leaf["description"], "start_time": timestamp(), "item_type": "STEP", "code_ref": leaf.get("code_ref", None), @@ -810,7 +811,7 @@ def start_pytest_item(self, test_item: Optional[Item] = None): Start pytest_item. :param test_item: pytest.Item - :return: item ID + :return: None """ if test_item is None: return @@ -1135,22 +1136,93 @@ def _get_scenario_template(self, scenario: Scenario) -> Optional[ScenarioTemplat if scenario_template and isinstance(scenario_template, ScenarioTemplate): return scenario_template - def _get_scenario_parameter_from_template( - self, scenario: Scenario, scenario_template: ScenarioTemplate + def _get_scenario_parameters_from_template( + self, scenario: Scenario, scenario_template: Optional[ScenarioTemplate] ) -> Optional[Dict[str, Any]]: - pass + """Get scenario parameters from its template by comparing steps. + + :param scenario: The scenario instance + :param scenario_template: The template scenario instance which holds examples + + :return: A dictionary with parameter names and values, or None if no parameters found + """ + if not scenario_template: + return None + + # Handle both single Examples and list of Examples + examples_list = [] + if isinstance(scenario_template.examples, list): + examples_list.extend(scenario_template.examples) + else: + examples_list.append(scenario_template.examples) + + # Get rendered scenario step names + scenario_steps = [step.name for step in scenario.steps] + + # Try each example row until we find matching parameters + for examples in examples_list: + if not examples or not examples.examples: + continue + + param_names = examples.example_params - def _get_scenario_code_ref(self, scenario: Scenario) -> str: + # Check each row of examples + for values in examples.examples: + # Create parameters dictionary for the current row + params = dict(zip(param_names, values)) + + # Compare template steps with scenario steps + template_steps = [] + for template_step in scenario_template.steps: + step_name = template_step.name + # Replace parameters in step name with values + for param_name, param_value in params.items(): + pattern = f"<{param_name}>" + step_name = step_name.replace(pattern, str(param_value)) + template_steps.append(step_name) + + # If all steps match, we found our parameters + if template_steps == scenario_steps: + return params + + return None + + def _get_scenario_code_ref(self, scenario: Scenario, scenario_template: Optional[ScenarioTemplate]) -> str: code_ref = scenario.feature.rel_filename + "/" rule = getattr(scenario, "rule", None) if rule: code_ref += f"[RULE:{rule.name}]/" - code_ref += f"[SCENARIO:{scenario.name}]" + if scenario_template and scenario_template.templated and scenario_template.examples: + parameters = self._get_scenario_parameters_from_template(scenario, scenario_template) + if parameters: + parameters_str = ";".join([f"{k}:{v}" for k, v in sorted(parameters.items())]) + parameters_str = f"[{parameters_str}]" if parameters_str else "" + else: + parameters_str = "" + code_ref += f"[EXAMPLE:{scenario.name}{parameters_str}]" + else: + keyword = getattr(scenario, "keyword", "Scenario").upper() + code_ref += f"[{keyword}:{scenario.name}]" + return code_ref def _get_scenario_test_case_id(self, leaf: Dict[str, Any]) -> str: return leaf["code_ref"] + def _dict_to_markdown_table(self, data: Dict[str, Any]) -> str: + if not data: + return "" + + headers = list(data.keys()) + values = list(data.values()) + + header_row = "| " + " | ".join(headers) + " |" + separator_row = "| " + " | ".join(["---"] * len(headers)) + " |" + value_row = "| " + " | ".join(map(str, values)) + " |" + table = "\n".join([header_row, separator_row, value_row]) + + return table + def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: """ Process all types of scenario metadata for its start event. @@ -1158,10 +1230,21 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: :param leaf: item context """ scenario = leaf["item"] + description = ( + "\n".join(scenario.description) if isinstance(scenario.description, list) else scenario.description + ) + leaf["description"] = description if description else None scenario_template = self._get_scenario_template(scenario) if scenario_template: - leaf["parameters"] = self._get_scenario_parameter_from_template(scenario, scenario_template) - leaf["code_ref"] = self._get_scenario_code_ref(scenario) + parameters = self._get_scenario_parameters_from_template(scenario, scenario_template) + leaf["parameters"] = parameters + if parameters: + parameters_str = f"Parameters:\n{self._dict_to_markdown_table(parameters)}" + if leaf["description"]: + leaf["description"] = leaf["description"] + f"\n\n---\n\n{parameters_str}" + else: + leaf["description"] = parameters_str + leaf["code_ref"] = self._get_scenario_code_ref(scenario, scenario_template) leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) leaf["attributes"] = self._process_bdd_attributes(scenario) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 0685bdb..b9580cf 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -560,12 +560,15 @@ def test_scenario_outline_parameters(mock_client_init): assert scenario_call_1[1].get("has_stats", True) assert ( scenario_call_1[1]["code_ref"] - == 'features/scenario_outline_parameters.feature/[SCENARIO:Test with different parameters["first",123]]' + == "features/scenario_outline_parameters.feature/[EXAMPLE:Test with different parameters" + '[parameters:123;str:"first"]]' ) - assert ("str", "first") in scenario_call_1[1]["parameters"].items() - assert ("parameters", 123) in scenario_call_1[1]["parameters"].items() + parameters = scenario_call_1[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"first"') in parameters + assert ("parameters", "123") in parameters assert scenario_call_1[1]["description"] is not None - assert scenario_call_1[1]["description"].endswith('| "first" | 123 |') + assert scenario_call_1[1]["description"].endswith('| "first" | 123 |') # Verify steps for first scenario given_step_1 = mock_client.start_test_item.call_args_list[1] @@ -596,12 +599,15 @@ def test_scenario_outline_parameters(mock_client_init): assert scenario_call_2[1].get("has_stats", True) assert ( scenario_call_2[1]["code_ref"] - == 'features/scenario_outline_parameters.feature/[SCENARIO:Test with different parameters["second",12345]]' + == "features/scenario_outline_parameters.feature/[EXAMPLE:Test with different parameters" + '[parameters:12345;str:"second"]]' ) - assert ("str", "second") in scenario_call_2[1]["parameters"].items() - assert ("parameters", 12345) in scenario_call_2[1]["parameters"].items() + parameters = scenario_call_2[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"second"') in parameters + assert ("parameters", "12345") in parameters assert scenario_call_2[1]["description"] is not None - assert scenario_call_2[1]["description"].endswith('| "second" | 12345 |') + assert scenario_call_2[1]["description"].endswith('| "second" | 12345 |') # Verify steps for second scenario given_step_2 = mock_client.start_test_item.call_args_list[5] @@ -629,12 +635,15 @@ def test_scenario_outline_parameters(mock_client_init): assert scenario_call_3[1].get("has_stats", True) assert ( scenario_call_3[1]["code_ref"] - == 'features/scenario_outline_parameters.feature/[SCENARIO:Test with different parameters["third",12345678]]' + == "features/scenario_outline_parameters.feature/[EXAMPLE:Test with different parameters" + '[parameters:12345678;str:"third"]]' ) - assert ("str", "third") in scenario_call_3[1]["parameters"].items() - assert ("parameters", 12345678) in scenario_call_3[1]["parameters"].items() + parameters = scenario_call_3[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"third"') in parameters + assert ("parameters", "12345678") in parameters assert scenario_call_3[1]["description"] is not None - assert scenario_call_3[1]["description"].endswith('| "third" | 12345678 |') + assert scenario_call_3[1]["description"].endswith('| "third" | 12345678 |') # Verify steps for third scenario given_step_3 = mock_client.start_test_item.call_args_list[9] From 8450e9fb0a56d717bca09fd23e14566fea1e19fc Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 25 Feb 2025 18:40:27 +0300 Subject: [PATCH 086/110] Fix tests for Python 3.8 --- pytest_reportportal/service.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 5f33b65..54eed41 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -354,6 +354,17 @@ def _remove_file_names(self, test_tree: Dict[str, Any]) -> None: child_leaf["parent"] = parent_leaf self._remove_file_names(child_leaf) + def _get_scenario_template(self, scenario: Scenario) -> Optional[ScenarioTemplate]: + line_num = scenario.line_number + feature = scenario.feature + scenario_template = None + for template in feature.scenarios.values(): + if template.line_number == line_num: + scenario_template = template + break + if scenario_template and isinstance(scenario_template, ScenarioTemplate): + return scenario_template + def _generate_names(self, test_tree: Dict[str, Any]) -> None: if test_tree["type"] == LeafType.ROOT: test_tree["name"] = "root" @@ -370,7 +381,11 @@ def _generate_names(self, test_tree: Dict[str, Any]) -> None: keyword = getattr(item, "keyword", "Feature") test_tree["name"] = f"{keyword}: {name}" elif isinstance(item, Scenario): - keyword = getattr(item, "keyword", "Scenario") + scenario_template = self._get_scenario_template(item) + if scenario_template and scenario_template.templated: + keyword = getattr(item, "keyword", "Scenario Outline") + else: + keyword = getattr(item, "keyword", "Scenario") test_tree["name"] = f"{keyword}: {item.name}" elif isinstance(item, Rule): keyword = getattr(item, "keyword", "Rule") @@ -1125,17 +1140,6 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) - def _get_scenario_template(self, scenario: Scenario) -> Optional[ScenarioTemplate]: - line_num = scenario.line_number - feature = scenario.feature - scenario_template = None - for template in feature.scenarios.values(): - if template.line_number == line_num: - scenario_template = template - break - if scenario_template and isinstance(scenario_template, ScenarioTemplate): - return scenario_template - def _get_scenario_parameters_from_template( self, scenario: Scenario, scenario_template: Optional[ScenarioTemplate] ) -> Optional[Dict[str, Any]]: From 37db7a873496b578067400f4a19d0c7e46bedc5d Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 25 Feb 2025 18:44:32 +0300 Subject: [PATCH 087/110] Fix tests for Python 3.8 --- tests/integration/test_bdd.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index b9580cf..95a2d82 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -170,9 +170,6 @@ def test_scenario_descriptions(mock_client_init): assert scenario_call[1]["code_ref"] == code_ref assert scenario_call[1]["test_case_id"] == code_ref description = scenario_call[1]["description"] - if pytest_bdd_version[0] < 8: - # before pytest-bdd 8 description was a list - description = description[0] assert description == "Description for the scenario" From d8f2f37377bf434c2636ac76a4fc46dfaf036310 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 25 Feb 2025 18:47:22 +0300 Subject: [PATCH 088/110] Fix tests for Python 3.8 --- pytest_reportportal/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 54eed41..f4b670c 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1236,7 +1236,7 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: scenario = leaf["item"] description = ( "\n".join(scenario.description) if isinstance(scenario.description, list) else scenario.description - ) + ).rstrip("\n") leaf["description"] = description if description else None scenario_template = self._get_scenario_template(scenario) if scenario_template: From ab276fe9aa768e30b07333f80321fb3a9ca4f2e0 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Tue, 25 Feb 2025 18:49:31 +0300 Subject: [PATCH 089/110] Fix isort --- examples/bdd/step_defs/scenario_outline_parameters_steps.py | 3 ++- examples/bdd/step_defs/test_rule_steps.py | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/bdd/step_defs/scenario_outline_parameters_steps.py b/examples/bdd/step_defs/scenario_outline_parameters_steps.py index 2bfdd88..ece05e1 100644 --- a/examples/bdd/step_defs/scenario_outline_parameters_steps.py +++ b/examples/bdd/step_defs/scenario_outline_parameters_steps.py @@ -13,7 +13,8 @@ # limitations under the License. import logging -from pytest_bdd import given, when, then, parsers, scenarios + +from pytest_bdd import given, parsers, scenarios, then, when # Import the scenario from the feature file scenarios("../features/scenario_outline_parameters.feature") diff --git a/examples/bdd/step_defs/test_rule_steps.py b/examples/bdd/step_defs/test_rule_steps.py index e01d308..e981126 100644 --- a/examples/bdd/step_defs/test_rule_steps.py +++ b/examples/bdd/step_defs/test_rule_steps.py @@ -15,7 +15,6 @@ """Rule keyword test module.""" from pytest_bdd import given, scenarios, then - scenarios("../features/rule_keyword.feature") From 36ca80c37e1b092279b20a8e898f205406ebdd8b Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 26 Feb 2025 14:21:19 +0300 Subject: [PATCH 090/110] Update reportportal-client to 5.6.1 and refactor scenario description formatting --- pytest_reportportal/service.py | 22 ++++------------------ requirements.txt | 2 +- tests/integration/test_bdd.py | 6 +++--- 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index f4b670c..f9ec203 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -30,7 +30,7 @@ from pytest import Class, Function, Item, Module, Package, PytestWarning, Session from reportportal_client.aio import Task from reportportal_client.core.rp_issues import ExternalIssue, Issue -from reportportal_client.helpers import timestamp +from reportportal_client.helpers import timestamp, markdown_helpers from .config import AgentConfig @@ -1142,7 +1142,7 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: def _get_scenario_parameters_from_template( self, scenario: Scenario, scenario_template: Optional[ScenarioTemplate] - ) -> Optional[Dict[str, Any]]: + ) -> Optional[Dict[str, str]]: """Get scenario parameters from its template by comparing steps. :param scenario: The scenario instance @@ -1213,20 +1213,6 @@ def _get_scenario_code_ref(self, scenario: Scenario, scenario_template: Optional def _get_scenario_test_case_id(self, leaf: Dict[str, Any]) -> str: return leaf["code_ref"] - def _dict_to_markdown_table(self, data: Dict[str, Any]) -> str: - if not data: - return "" - - headers = list(data.keys()) - values = list(data.values()) - - header_row = "| " + " | ".join(headers) + " |" - separator_row = "| " + " | ".join(["---"] * len(headers)) + " |" - value_row = "| " + " | ".join(map(str, values)) + " |" - table = "\n".join([header_row, separator_row, value_row]) - - return table - def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: """ Process all types of scenario metadata for its start event. @@ -1243,9 +1229,9 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: parameters = self._get_scenario_parameters_from_template(scenario, scenario_template) leaf["parameters"] = parameters if parameters: - parameters_str = f"Parameters:\n{self._dict_to_markdown_table(parameters)}" + parameters_str = f"Parameters:\n{markdown_helpers.format_data_table_dict(parameters)}" if leaf["description"]: - leaf["description"] = leaf["description"] + f"\n\n---\n\n{parameters_str}" + leaf["description"] = markdown_helpers.as_two_parts(leaf["description"], parameters_str) else: leaf["description"] = parameters_str leaf["code_ref"] = self._get_scenario_code_ref(scenario, scenario_template) diff --git a/requirements.txt b/requirements.txt index c488176..e53c020 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ dill>=0.3.6 pytest>=4.6.10 -reportportal-client~=5.6.0 +reportportal-client~=5.6.1 aenum>=3.1.0 diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 95a2d82..72b8e0f 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -565,7 +565,7 @@ def test_scenario_outline_parameters(mock_client_init): assert ("str", '"first"') in parameters assert ("parameters", "123") in parameters assert scenario_call_1[1]["description"] is not None - assert scenario_call_1[1]["description"].endswith('| "first" | 123 |') + assert scenario_call_1[1]["description"].endswith('|\xa0"first"\xa0|\xa0\xa0\xa0\xa0123\xa0\xa0\xa0\xa0\xa0|') # Verify steps for first scenario given_step_1 = mock_client.start_test_item.call_args_list[1] @@ -604,7 +604,7 @@ def test_scenario_outline_parameters(mock_client_init): assert ("str", '"second"') in parameters assert ("parameters", "12345") in parameters assert scenario_call_2[1]["description"] is not None - assert scenario_call_2[1]["description"].endswith('| "second" | 12345 |') + assert scenario_call_2[1]["description"].endswith('|\xa0"second"\xa0|\xa0\xa0\xa012345\xa0\xa0\xa0\xa0|') # Verify steps for second scenario given_step_2 = mock_client.start_test_item.call_args_list[5] @@ -640,7 +640,7 @@ def test_scenario_outline_parameters(mock_client_init): assert ("str", '"third"') in parameters assert ("parameters", "12345678") in parameters assert scenario_call_3[1]["description"] is not None - assert scenario_call_3[1]["description"].endswith('| "third" | 12345678 |') + assert scenario_call_3[1]["description"].endswith('|\xa0"third"\xa0|\xa0\xa012345678\xa0\xa0|') # Verify steps for third scenario given_step_3 = mock_client.start_test_item.call_args_list[9] From b6514246123ed7fcb9e61b3b62fdfa7343ae0466 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 26 Feb 2025 14:23:35 +0300 Subject: [PATCH 091/110] Fix isort --- pytest_reportportal/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index f9ec203..d9af454 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -30,7 +30,7 @@ from pytest import Class, Function, Item, Module, Package, PytestWarning, Session from reportportal_client.aio import Task from reportportal_client.core.rp_issues import ExternalIssue, Issue -from reportportal_client.helpers import timestamp, markdown_helpers +from reportportal_client.helpers import markdown_helpers, timestamp from .config import AgentConfig From 6b61ea426cd3528e1cd62f98bd1a6892ffa91cfd Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 26 Feb 2025 18:02:34 +0300 Subject: [PATCH 092/110] Add more tests --- examples/bdd/features/examples_tags.feature | 13 ++ .../bdd/features/rule_description.feature | 7 + .../scenario_outline_background.feature | 14 ++ .../scenario_outline_description.feature | 13 ++ examples/bdd/step_defs/example_tags_steps.py | 38 ++++ .../scenario_outline_background_steps.py | 44 ++++ .../scenario_outline_description_steps.py | 38 ++++ .../step_defs/test_rule_description_steps.py | 25 +++ pytest_reportportal/service.py | 18 +- tests/integration/test_bdd.py | 194 +++++++++++++++++- 10 files changed, 398 insertions(+), 6 deletions(-) create mode 100644 examples/bdd/features/examples_tags.feature create mode 100644 examples/bdd/features/rule_description.feature create mode 100644 examples/bdd/features/scenario_outline_background.feature create mode 100644 examples/bdd/features/scenario_outline_description.feature create mode 100644 examples/bdd/step_defs/example_tags_steps.py create mode 100644 examples/bdd/step_defs/scenario_outline_background_steps.py create mode 100644 examples/bdd/step_defs/scenario_outline_description_steps.py create mode 100644 examples/bdd/step_defs/test_rule_description_steps.py diff --git a/examples/bdd/features/examples_tags.feature b/examples/bdd/features/examples_tags.feature new file mode 100644 index 0000000..77b3707 --- /dev/null +++ b/examples/bdd/features/examples_tags.feature @@ -0,0 +1,13 @@ +Feature: Basic test with parameters + + Scenario Outline: Test with different parameters + Given It is test with parameters + When I have parameter + Then I emit number on level info + + @test + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | + | "third" | 12345678 | diff --git a/examples/bdd/features/rule_description.feature b/examples/bdd/features/rule_description.feature new file mode 100644 index 0000000..5d5ecdb --- /dev/null +++ b/examples/bdd/features/rule_description.feature @@ -0,0 +1,7 @@ +Feature: Test rule keyword + + Rule: The first rule + Description for the Rule + + Scenario: The first scenario + Given I have empty step diff --git a/examples/bdd/features/scenario_outline_background.feature b/examples/bdd/features/scenario_outline_background.feature new file mode 100644 index 0000000..a1dddd3 --- /dev/null +++ b/examples/bdd/features/scenario_outline_background.feature @@ -0,0 +1,14 @@ +Feature: Basic test with parameters and background + + Background: + Given I have empty step in background + + Scenario Outline: Test with different parameters + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | diff --git a/examples/bdd/features/scenario_outline_description.feature b/examples/bdd/features/scenario_outline_description.feature new file mode 100644 index 0000000..371ef7c --- /dev/null +++ b/examples/bdd/features/scenario_outline_description.feature @@ -0,0 +1,13 @@ +Feature: Basic test with parameters and description + + Scenario Outline: Test with different parameters + The description for the scenario outline + + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | diff --git a/examples/bdd/step_defs/example_tags_steps.py b/examples/bdd/step_defs/example_tags_steps.py new file mode 100644 index 0000000..9eef907 --- /dev/null +++ b/examples/bdd/step_defs/example_tags_steps.py @@ -0,0 +1,38 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/examples_tags.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/scenario_outline_background_steps.py b/examples/bdd/step_defs/scenario_outline_background_steps.py new file mode 100644 index 0000000..ee75256 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_background_steps.py @@ -0,0 +1,44 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_background.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("I have empty step in background") +def empty_step(): + """Empty step implementation.""" + pass + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/scenario_outline_description_steps.py b/examples/bdd/step_defs/scenario_outline_description_steps.py new file mode 100644 index 0000000..4ff1765 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_description_steps.py @@ -0,0 +1,38 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_description.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/examples/bdd/step_defs/test_rule_description_steps.py b/examples/bdd/step_defs/test_rule_description_steps.py new file mode 100644 index 0000000..cb1718f --- /dev/null +++ b/examples/bdd/step_defs/test_rule_description_steps.py @@ -0,0 +1,25 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rule keyword test module.""" + +from pytest_bdd import given, scenarios, then + +scenarios("../features/rule_description.feature") + + +@given("I have empty step") +def empty_step(): + """Empty step implementation.""" + pass diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index d9af454..16f0ef8 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -484,10 +484,10 @@ def _get_item_description(self, test_item: Any) -> Optional[str]: return trim_docstring(doc) if isinstance(test_item, DoctestItem): return test_item.reportinfo()[2] - if isinstance(test_item, Feature): + if isinstance(test_item, (Feature, Rule)): description = test_item.description if description: - return description + return description.lstrip() # There is a bug in pytest-bdd that adds an extra space def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> Any: """ @@ -503,8 +503,20 @@ def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> return func(leaf) def _process_bdd_attributes(self, scenario: Union[Feature, Scenario, Rule]) -> List[Dict[str, str]]: + tags = [] + tags.extend(scenario.tags) + if isinstance(scenario, Scenario): + template = self._get_scenario_template(scenario) + if template and template.templated: + examples = [] + if isinstance(template.examples, list): + examples.extend(template.examples) + else: + examples.append(template.examples) + for example in examples: + tags.extend(example.tags) attributes = [] - for tag in scenario.tags: + for tag in tags: key = None value = tag if ATTRIBUTE_DELIMITER in tag: diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 72b8e0f..b6c582e 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -232,7 +232,7 @@ def test_scenario_attributes(mock_client_init): assert int(result) == 0, "Exit code should be 0 (no errors)" scenario_call = mock_client.start_test_item.call_args_list[0] - scenario_attrs = scenario_call[1].get("attributes", []) + scenario_attrs = scenario_call[1].get("attributes", None) assert scenario_attrs is not None assert len(scenario_attrs) == 2 assert {"value": "ok"} in scenario_attrs @@ -542,8 +542,7 @@ def test_rule_hierarchy(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_scenario_outline_parameters(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_parameters_steps.py"]) assert int(result) == 0, "Exit code should be 0 (no errors)" @@ -662,3 +661,192 @@ def test_scenario_outline_parameters(mock_client_init): finish_calls = mock_client.finish_test_item.call_args_list for call in finish_calls: assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_examples_tags(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/example_tags_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario + scenario_call = mock_client.start_test_item.call_args_list[0] + scenario_attrs = scenario_call[1].get("attributes", None) + assert scenario_attrs is not None + assert len(scenario_attrs) == 1 + assert {"value": "test"} in scenario_attrs + + # Verify second scenario + scenario_call = mock_client.start_test_item.call_args_list[4] + scenario_attrs = scenario_call[1].get("attributes", None) + assert scenario_attrs is not None + assert len(scenario_attrs) == 1 + assert {"value": "test"} in scenario_attrs + + # Verify third scenario + scenario_call = mock_client.start_test_item.call_args_list[8] + scenario_attrs = scenario_call[1].get("attributes", None) + assert scenario_attrs is not None + assert len(scenario_attrs) == 1 + assert {"value": "test"} in scenario_attrs + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_background_steps(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_background_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario with background + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call_1[1]["name"] + == "Feature: Basic test with parameters and background - Scenario Outline: Test with different parameters" + ) + assert scenario_call_1[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + assert ( + scenario_call_1[1]["code_ref"] + == "features/scenario_outline_background.feature/[EXAMPLE:Test with different parameters" + '[parameters:123;str:"first"]]' + ) + parameters = scenario_call_1[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"first"') in parameters + assert ("parameters", "123") in parameters + + # Verify the Background step + background_call = mock_client.start_test_item.call_args_list[1] + assert background_call[0][0] == "Background" + assert background_call[0][2] == "step" + assert background_call[1]["has_stats"] is False + assert background_call[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + + # Verify background step for first scenario + background_step_1 = mock_client.start_test_item.call_args_list[2] + assert background_step_1[0][0] == "Given I have empty step in background" + assert background_step_1[1]["parent_item_id"] == background_call[0][0] + "_4" + assert background_step_1[1]["has_stats"] is False + + # Verify regular steps for first scenario + given_step_1 = mock_client.start_test_item.call_args_list[3] + assert given_step_1[0][0] == "Given It is test with parameters" + assert given_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert given_step_1[1]["has_stats"] is False + + when_step_1 = mock_client.start_test_item.call_args_list[4] + assert when_step_1[0][0] == 'When I have parameter "first"' + assert when_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert when_step_1[1]["has_stats"] is False + + then_step_1 = mock_client.start_test_item.call_args_list[5] + assert then_step_1[0][0] == "Then I emit number 123 on level info" + assert then_step_1[1]["parent_item_id"] == scenario_call_1[1]["name"] + "_1" + assert then_step_1[1]["has_stats"] is False + + # Verify second scenario with background + scenario_call_2 = mock_client.start_test_item.call_args_list[6] + assert ( + scenario_call_2[1]["name"] + == "Feature: Basic test with parameters and background - Scenario Outline: Test with different parameters" + ) + assert scenario_call_2[1]["item_type"] == "STEP" + assert scenario_call_2[1].get("has_stats", True) + assert ( + scenario_call_2[1]["code_ref"] + == "features/scenario_outline_background.feature/[EXAMPLE:Test with different parameters" + '[parameters:12345;str:"second"]]' + ) + parameters = scenario_call_2[1]["parameters"].items() + assert len(parameters) == 2 + assert ("str", '"second"') in parameters + assert ("parameters", "12345") in parameters + + # Verify the Background step + background_call = mock_client.start_test_item.call_args_list[7] + assert background_call[0][0] == "Background" + assert background_call[0][2] == "step" + assert background_call[1]["has_stats"] is False + assert background_call[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + + # Verify background step for second scenario + background_step_2 = mock_client.start_test_item.call_args_list[8] + assert background_step_2[0][0] == "Given I have empty step in background" + assert background_step_2[1]["parent_item_id"] == background_call[0][0] + "_5" + assert background_step_2[1]["has_stats"] is False + + # Verify steps for second scenario + given_step_2 = mock_client.start_test_item.call_args_list[9] + assert given_step_2[0][0] == "Given It is test with parameters" + assert given_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert given_step_2[1]["has_stats"] is False + + when_step_2 = mock_client.start_test_item.call_args_list[10] + assert when_step_2[0][0] == 'When I have parameter "second"' + assert when_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert when_step_2[1]["has_stats"] is False + + then_step_2 = mock_client.start_test_item.call_args_list[11] + assert then_step_2[0][0] == "Then I emit number 12345 on level info" + assert then_step_2[1]["parent_item_id"] == scenario_call_2[1]["name"] + "_2" + assert then_step_2[1]["has_stats"] is False + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_description(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_description_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario with description + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + description_1 = scenario_call_1[1]["description"] + assert description_1.startswith("The description for the scenario outline") + assert "Parameters:" in description_1 + assert "|\xa0\xa0\xa0str\xa0\xa0\xa0|\xa0parameters\xa0|" in description_1 + assert '|\xa0"first"\xa0|\xa0\xa0\xa0\xa0123\xa0\xa0\xa0\xa0\xa0|' in description_1 + + scenario_call_2 = mock_client.start_test_item.call_args_list[4] + description_2 = scenario_call_2[1]["description"] + assert description_2.startswith("The description for the scenario outline") + assert "Parameters:" in description_2 + assert "|\xa0\xa0\xa0str\xa0\xa0\xa0\xa0|\xa0parameters\xa0|" in description_2 + assert '|\xa0"second"\xa0|\xa0\xa0\xa012345\xa0\xa0\xa0\xa0|' in description_2 + + # Verify the steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rule_description(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + variables = {"rp_hierarchy_code": True} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_rule_description_steps.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify Feature call + feature_call = mock_client.start_test_item.call_args_list[0] + assert feature_call[1]["name"] == "Feature: Test rule keyword" + assert feature_call[1]["code_ref"].endswith("rule_description.feature") + + # Verify Rule call + rule_call = mock_client.start_test_item.call_args_list[1] + assert rule_call[1]["name"] == "Rule: The first rule" + assert rule_call[1]["description"] == "Description for the Rule" + assert rule_call[1]["item_type"] == "SUITE" From b427f457f0d71bd7ce276584301470b414dac179 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Wed, 26 Feb 2025 18:10:44 +0300 Subject: [PATCH 093/110] Backward compatibility fixes --- examples/bdd/step_defs/test_rule_description_steps.py | 2 +- pytest_reportportal/service.py | 2 +- tests/integration/test_bdd.py | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/bdd/step_defs/test_rule_description_steps.py b/examples/bdd/step_defs/test_rule_description_steps.py index cb1718f..60beafa 100644 --- a/examples/bdd/step_defs/test_rule_description_steps.py +++ b/examples/bdd/step_defs/test_rule_description_steps.py @@ -14,7 +14,7 @@ """Rule keyword test module.""" -from pytest_bdd import given, scenarios, then +from pytest_bdd import given, scenarios scenarios("../features/rule_description.feature") diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 16f0ef8..fbfa34d 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -514,7 +514,7 @@ def _process_bdd_attributes(self, scenario: Union[Feature, Scenario, Rule]) -> L else: examples.append(template.examples) for example in examples: - tags.extend(example.tags) + tags.extend(getattr(example, "tags", [])) attributes = [] for tag in tags: key = None diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index b6c582e..ac26ad2 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -663,6 +663,7 @@ def test_scenario_outline_parameters(mock_client_init): assert call[1]["status"] == "PASSED" +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) def test_examples_tags(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) From 86562a149536cf03a714f90015623fef5fd9e2e4 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 15:38:25 +0300 Subject: [PATCH 094/110] Fix parameter description --- pytest_reportportal/service.py | 2 +- tests/integration/test_bdd.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index fbfa34d..63a7dda 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1241,7 +1241,7 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: parameters = self._get_scenario_parameters_from_template(scenario, scenario_template) leaf["parameters"] = parameters if parameters: - parameters_str = f"Parameters:\n{markdown_helpers.format_data_table_dict(parameters)}" + parameters_str = f"Parameters:\n\n{markdown_helpers.format_data_table_dict(parameters)}" if leaf["description"]: leaf["description"] = markdown_helpers.as_two_parts(leaf["description"], parameters_str) else: diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index ac26ad2..a74a3f1 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -813,14 +813,14 @@ def test_scenario_outline_description(mock_client_init): scenario_call_1 = mock_client.start_test_item.call_args_list[0] description_1 = scenario_call_1[1]["description"] assert description_1.startswith("The description for the scenario outline") - assert "Parameters:" in description_1 + assert "Parameters:\n\n" in description_1 assert "|\xa0\xa0\xa0str\xa0\xa0\xa0|\xa0parameters\xa0|" in description_1 assert '|\xa0"first"\xa0|\xa0\xa0\xa0\xa0123\xa0\xa0\xa0\xa0\xa0|' in description_1 scenario_call_2 = mock_client.start_test_item.call_args_list[4] description_2 = scenario_call_2[1]["description"] assert description_2.startswith("The description for the scenario outline") - assert "Parameters:" in description_2 + assert "Parameters:\n\n" in description_2 assert "|\xa0\xa0\xa0str\xa0\xa0\xa0\xa0|\xa0parameters\xa0|" in description_2 assert '|\xa0"second"\xa0|\xa0\xa0\xa012345\xa0\xa0\xa0\xa0|' in description_2 From 729aa0f2ca0d29b05ecc890c204e41f23678bf04 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 16:30:22 +0300 Subject: [PATCH 095/110] Simplify parameter get --- pytest_reportportal/service.py | 70 +++++++++------------------------- 1 file changed, 18 insertions(+), 52 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 63a7dda..7e605ed 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -19,6 +19,7 @@ import sys import threading import traceback +from collections import OrderedDict from functools import wraps from os import curdir from time import sleep, time @@ -168,6 +169,7 @@ class PyTestService: _bdd_tree: Optional[Dict[str, Any]] _bdd_item_by_name: Dict[str, Item] _bdd_scenario_by_item: Dict[Item, Scenario] + _bdd_item_by_scenario: Dict[Scenario, Item] _start_tracker: Set[str] _launch_id: Optional[str] agent_name: str @@ -183,8 +185,9 @@ def __init__(self, agent_config: AgentConfig) -> None: self._issue_types = {} self._tree_path = {} self._bdd_tree = None - self._bdd_item_by_name = {} + self._bdd_item_by_name = OrderedDict() self._bdd_scenario_by_item = {} + self._bdd_item_by_scenario = {} self._start_tracker = set() self._launch_id = None self.agent_name = "pytest-reportportal" @@ -1077,14 +1080,11 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None def _get_python_name(self, name: str) -> str: python_name = f"test_{make_python_name(name)}" - same_scenario_names = [name for name in self._bdd_item_by_name.keys() if name.startswith(python_name)] - if len(same_scenario_names) < 1: + same_item_names = [name for name in self._bdd_item_by_name.keys() if name.startswith(python_name)] + if len(same_item_names) < 1: return python_name - elif len(same_scenario_names) == 1: - return same_scenario_names[0] else: - indexes = sorted([int(name.split("_")[-1]) for name in same_scenario_names]) - return f"{python_name}_{indexes[-1]}" + return same_item_names[-1] # Should work fine, since we use OrderedDict def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """Save BDD scenario and Feature to test tree. The scenario will be started later if a step will be reported. @@ -1097,6 +1097,7 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: item_name = self._get_python_name(scenario.name) test_item = self._bdd_item_by_name.get(item_name, None) self._bdd_scenario_by_item[test_item] = scenario + self._bdd_item_by_scenario[scenario] = test_item root_leaf = self._bdd_tree if not root_leaf: @@ -1152,55 +1153,20 @@ def finish_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: leaf["exec"] = ExecStatus.FINISHED self._finish_parents(leaf) - def _get_scenario_parameters_from_template( - self, scenario: Scenario, scenario_template: Optional[ScenarioTemplate] - ) -> Optional[Dict[str, str]]: + def _get_scenario_parameters_from_template(self, scenario: Scenario) -> Optional[Dict[str, str]]: """Get scenario parameters from its template by comparing steps. :param scenario: The scenario instance - :param scenario_template: The template scenario instance which holds examples - :return: A dictionary with parameter names and values, or None if no parameters found """ - if not scenario_template: + item = self._bdd_item_by_scenario.get(scenario, None) + if not item: return None - - # Handle both single Examples and list of Examples - examples_list = [] - if isinstance(scenario_template.examples, list): - examples_list.extend(scenario_template.examples) - else: - examples_list.append(scenario_template.examples) - - # Get rendered scenario step names - scenario_steps = [step.name for step in scenario.steps] - - # Try each example row until we find matching parameters - for examples in examples_list: - if not examples or not examples.examples: - continue - - param_names = examples.example_params - - # Check each row of examples - for values in examples.examples: - # Create parameters dictionary for the current row - params = dict(zip(param_names, values)) - - # Compare template steps with scenario steps - template_steps = [] - for template_step in scenario_template.steps: - step_name = template_step.name - # Replace parameters in step name with values - for param_name, param_value in params.items(): - pattern = f"<{param_name}>" - step_name = step_name.replace(pattern, str(param_value)) - template_steps.append(step_name) - - # If all steps match, we found our parameters - if template_steps == scenario_steps: - return params - + item_params = item.callspec.params if hasattr(item, "callspec") else None + if not item_params: + return None + if "_pytest_bdd_example" in item_params: + return OrderedDict(item_params["_pytest_bdd_example"]) return None def _get_scenario_code_ref(self, scenario: Scenario, scenario_template: Optional[ScenarioTemplate]) -> str: @@ -1209,7 +1175,7 @@ def _get_scenario_code_ref(self, scenario: Scenario, scenario_template: Optional if rule: code_ref += f"[RULE:{rule.name}]/" if scenario_template and scenario_template.templated and scenario_template.examples: - parameters = self._get_scenario_parameters_from_template(scenario, scenario_template) + parameters = self._get_scenario_parameters_from_template(scenario) if parameters: parameters_str = ";".join([f"{k}:{v}" for k, v in sorted(parameters.items())]) parameters_str = f"[{parameters_str}]" if parameters_str else "" @@ -1238,7 +1204,7 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: leaf["description"] = description if description else None scenario_template = self._get_scenario_template(scenario) if scenario_template: - parameters = self._get_scenario_parameters_from_template(scenario, scenario_template) + parameters = self._get_scenario_parameters_from_template(scenario) leaf["parameters"] = parameters if parameters: parameters_str = f"Parameters:\n\n{markdown_helpers.format_data_table_dict(parameters)}" From c2e660b0333def4e54f5738609edaff6a8ce6b74 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 17:06:38 +0300 Subject: [PATCH 096/110] Add dynamic name test and fixes --- .../dynamic_scenario_outline_names.feature | 11 +++++ .../step_defs/scenario_outline_name_steps.py | 38 ++++++++++++++++ pytest_reportportal/service.py | 6 +-- tests/integration/test_bdd.py | 43 +++++++++++++++++++ 4 files changed, 95 insertions(+), 3 deletions(-) create mode 100644 examples/bdd/features/dynamic_scenario_outline_names.feature create mode 100644 examples/bdd/step_defs/scenario_outline_name_steps.py diff --git a/examples/bdd/features/dynamic_scenario_outline_names.feature b/examples/bdd/features/dynamic_scenario_outline_names.feature new file mode 100644 index 0000000..0f3ed07 --- /dev/null +++ b/examples/bdd/features/dynamic_scenario_outline_names.feature @@ -0,0 +1,11 @@ +Feature: Dynamic scenario outline names + + Scenario Outline: Test with the parameter + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | diff --git a/examples/bdd/step_defs/scenario_outline_name_steps.py b/examples/bdd/step_defs/scenario_outline_name_steps.py new file mode 100644 index 0000000..75b6d69 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_name_steps.py @@ -0,0 +1,38 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/dynamic_scenario_outline_names.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 7e605ed..960ae56 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1078,8 +1078,8 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None LOGGER.exception(e) reporter.finish_nested_step(item_id, timestamp(), "FAILED") - def _get_python_name(self, name: str) -> str: - python_name = f"test_{make_python_name(name)}" + def _get_python_name(self, scenario: Scenario) -> str: + python_name = f"test_{make_python_name(self._get_scenario_template(scenario).name)}" same_item_names = [name for name in self._bdd_item_by_name.keys() if name.startswith(python_name)] if len(same_item_names) < 1: return python_name @@ -1094,7 +1094,7 @@ def start_bdd_scenario(self, feature: Feature, scenario: Scenario) -> None: """ if not PYTEST_BDD: return - item_name = self._get_python_name(scenario.name) + item_name = self._get_python_name(scenario) test_item = self._bdd_item_by_name.get(item_name, None) self._bdd_scenario_by_item[test_item] = scenario self._bdd_item_by_scenario[scenario] = test_item diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index a74a3f1..35ecef7 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -851,3 +851,46 @@ def test_rule_description(mock_client_init): assert rule_call[1]["name"] == "Rule: The first rule" assert rule_call[1]["description"] == "Description for the Rule" assert rule_call[1]["item_type"] == "SUITE" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_dynamic_name(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_name_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call_1[1]["name"] + == 'Feature: Dynamic scenario outline names - Scenario Outline: Test with the parameter "first"' + ) + assert scenario_call_1[1]["item_type"] == "STEP" + assert ( + scenario_call_1[1]["code_ref"] + == 'features/dynamic_scenario_outline_names.feature/[EXAMPLE:Test with the parameter "first"[parameters:123;str:"first"]]' + ) + assert scenario_call_1[1]["parameters"] == {"str": '"first"', "parameters": "123"} + assert scenario_call_1[1]["description"] == ( + "Parameters:\n\n" + "\xa0\xa0\xa0\xa0|\xa0\xa0\xa0str\xa0\xa0\xa0|\xa0parameters\xa0|\n" + "\xa0\xa0\xa0\xa0|---------|------------|\n" + '\xa0\xa0\xa0\xa0|\xa0"first"\xa0|\xa0\xa0\xa0\xa0123\xa0\xa0\xa0\xa0\xa0|' + ) + + scenario_call_2 = mock_client.start_test_item.call_args_list[4] + assert ( + scenario_call_2[1]["name"] + == 'Feature: Dynamic scenario outline names - Scenario Outline: Test with the parameter "second"' + ) + assert scenario_call_2[1]["item_type"] == "STEP" + assert ( + scenario_call_2[1]["code_ref"] + == 'features/dynamic_scenario_outline_names.feature/[EXAMPLE:Test with the parameter "second"[parameters:12345;str:"second"]]' + ) + assert scenario_call_2[1]["parameters"] == {"str": '"second"', "parameters": "12345"} + assert scenario_call_2[1]["description"] == ( + "Parameters:\n\n" + "\xa0\xa0\xa0\xa0|\xa0\xa0\xa0str\xa0\xa0\xa0\xa0|\xa0parameters\xa0|\n" + "\xa0\xa0\xa0\xa0|----------|------------|\n" + '\xa0\xa0\xa0\xa0|\xa0"second"\xa0|\xa0\xa0\xa012345\xa0\xa0\xa0\xa0|' + ) From 27b04b0dc29e2b35381887ea956b8fad3862b7a5 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 17:30:11 +0300 Subject: [PATCH 097/110] Add another test --- .../features/scenario_outline_fail.feature | 12 +++ .../step_defs/scenario_outline_fail_steps.py | 43 +++++++++ tests/integration/test_bdd.py | 87 ++++++++++++++++++- 3 files changed, 138 insertions(+), 4 deletions(-) create mode 100644 examples/bdd/features/scenario_outline_fail.feature create mode 100644 examples/bdd/step_defs/scenario_outline_fail_steps.py diff --git a/examples/bdd/features/scenario_outline_fail.feature b/examples/bdd/features/scenario_outline_fail.feature new file mode 100644 index 0000000..049a70c --- /dev/null +++ b/examples/bdd/features/scenario_outline_fail.feature @@ -0,0 +1,12 @@ +Feature: Basic test with parameters which fails + + Scenario Outline: Test with different parameters failing + Given It is test with parameters + When I have parameter + Then I emit number on level info + Then I fail + + Examples: + | str | parameters | + | "first" | 123 | + | "second" | 12345 | diff --git a/examples/bdd/step_defs/scenario_outline_fail_steps.py b/examples/bdd/step_defs/scenario_outline_fail_steps.py new file mode 100644 index 0000000..ebca0a3 --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_fail_steps.py @@ -0,0 +1,43 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_fail.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) + + +@then("I fail") +def fail_step(): + raise AssertionError("This step always fails") diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 35ecef7..2379c92 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -866,8 +866,8 @@ def test_scenario_outline_dynamic_name(mock_client_init): ) assert scenario_call_1[1]["item_type"] == "STEP" assert ( - scenario_call_1[1]["code_ref"] - == 'features/dynamic_scenario_outline_names.feature/[EXAMPLE:Test with the parameter "first"[parameters:123;str:"first"]]' + scenario_call_1[1]["code_ref"] == "features/dynamic_scenario_outline_names.feature/" + '[EXAMPLE:Test with the parameter "first"[parameters:123;str:"first"]]' ) assert scenario_call_1[1]["parameters"] == {"str": '"first"', "parameters": "123"} assert scenario_call_1[1]["description"] == ( @@ -884,8 +884,8 @@ def test_scenario_outline_dynamic_name(mock_client_init): ) assert scenario_call_2[1]["item_type"] == "STEP" assert ( - scenario_call_2[1]["code_ref"] - == 'features/dynamic_scenario_outline_names.feature/[EXAMPLE:Test with the parameter "second"[parameters:12345;str:"second"]]' + scenario_call_2[1]["code_ref"] == "features/dynamic_scenario_outline_names.feature/" + '[EXAMPLE:Test with the parameter "second"[parameters:12345;str:"second"]]' ) assert scenario_call_2[1]["parameters"] == {"str": '"second"', "parameters": "12345"} assert scenario_call_2[1]["description"] == ( @@ -894,3 +894,82 @@ def test_scenario_outline_dynamic_name(mock_client_init): "\xa0\xa0\xa0\xa0|----------|------------|\n" '\xa0\xa0\xa0\xa0|\xa0"second"\xa0|\xa0\xa0\xa012345\xa0\xa0\xa0\xa0|' ) + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_fail(mock_client_init): + mock_client = setup_mock(mock_client_init) + setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_fail_steps.py"]) + assert int(result) == 1, "Exit code should be 1 (test error)" + + # Verify first scenario with parameters + scenario_call_1 = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call_1[1]["name"] + == "Feature: Basic test with parameters which fails - Scenario Outline: Test with different parameters failing" + ) + assert scenario_call_1[1]["item_type"] == "STEP" + assert scenario_call_1[1].get("has_stats", True) + assert ( + scenario_call_1[1]["code_ref"] + == "features/scenario_outline_fail.feature/[EXAMPLE:Test with different parameters failing" + '[parameters:123;str:"first"]]' + ) + + # Check failure logging for first scenario + finish_step_call_1 = mock_client.finish_test_item.call_args_list[3] + assert finish_step_call_1[1]["status"] == "FAILED" + assert finish_step_call_1[0][0].startswith("Then I fail") + + finish_scenario_call_1 = mock_client.finish_test_item.call_args_list[4] + assert finish_scenario_call_1[1]["status"] == "FAILED" + assert finish_scenario_call_1[1]["item_id"] == scenario_call_1[1]["name"] + "_1" + + log_calls = [ + log_call + for log_call in mock_client.log.call_args_list + if "level" in log_call[1] and log_call[1]["level"] == "ERROR" + ] + assert len(log_calls) >= 2, "Should have at least 2 error log calls" + + error_log = log_calls[0][1] + assert "AssertionError: This step always fails" in error_log["message"] + assert error_log["item_id"].startswith("Then I fail") + + final_error_log = log_calls[1][1] + assert final_error_log["level"] == "ERROR" + assert final_error_log["message"].endswith("AssertionError") + assert final_error_log["item_id"] == scenario_call_1[1]["name"] + "_1" + + # Verify first scenario with parameters + scenario_call_2 = mock_client.start_test_item.call_args_list[5] + assert ( + scenario_call_2[1]["name"] + == "Feature: Basic test with parameters which fails - Scenario Outline: Test with different parameters failing" + ) + assert scenario_call_2[1]["item_type"] == "STEP" + assert scenario_call_2[1].get("has_stats", True) + assert ( + scenario_call_2[1]["code_ref"] + == "features/scenario_outline_fail.feature/[EXAMPLE:Test with different parameters failing" + '[parameters:12345;str:"second"]]' + ) + + # Check failure logging for first scenario + finish_step_call_2 = mock_client.finish_test_item.call_args_list[5 + 3] + assert finish_step_call_2[1]["status"] == "FAILED" + assert finish_step_call_2[0][0].startswith("Then I fail") + + finish_scenario_call_2 = mock_client.finish_test_item.call_args_list[5 + 4] + assert finish_scenario_call_2[1]["status"] == "FAILED" + assert finish_scenario_call_2[1]["item_id"] == scenario_call_2[1]["name"] + "_2" + + error_log = log_calls[2][1] + assert "AssertionError: This step always fails" in error_log["message"] + assert error_log["item_id"].startswith("Then I fail") + + final_error_log = log_calls[3][1] + assert final_error_log["level"] == "ERROR" + assert final_error_log["message"].endswith("AssertionError") + assert final_error_log["item_id"] == scenario_call_1[1]["name"] + "_2" From 11da029f5900f9357888602129411a10b245bfc9 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 18:05:03 +0300 Subject: [PATCH 098/110] Add more tests --- .../bdd/features/data_table_parameter.feature | 6 ++ .../features/doc_string_parameters.feature | 8 +++ .../step_defs/data_table_parameter_steps.py | 33 ++++++++++ .../step_defs/doc_string_parameters_steps.py | 32 ++++++++++ tests/integration/test_bdd.py | 61 +++++++++++++++++++ 5 files changed, 140 insertions(+) create mode 100644 examples/bdd/features/data_table_parameter.feature create mode 100644 examples/bdd/features/doc_string_parameters.feature create mode 100644 examples/bdd/step_defs/data_table_parameter_steps.py create mode 100644 examples/bdd/step_defs/doc_string_parameters_steps.py diff --git a/examples/bdd/features/data_table_parameter.feature b/examples/bdd/features/data_table_parameter.feature new file mode 100644 index 0000000..3753571 --- /dev/null +++ b/examples/bdd/features/data_table_parameter.feature @@ -0,0 +1,6 @@ +Feature: A basic test with a Data Table parameter + + Scenario: Test with Data Table + Given a step with a data table: + | key | value | + | myKey | myValue | diff --git a/examples/bdd/features/doc_string_parameters.feature b/examples/bdd/features/doc_string_parameters.feature new file mode 100644 index 0000000..3216259 --- /dev/null +++ b/examples/bdd/features/doc_string_parameters.feature @@ -0,0 +1,8 @@ +Feature: Basic test with a docstring parameter + + Scenario: Test with a docstring parameter + Given I have a docstring parameter: + """ + My very long parameter + With some new lines + """ diff --git a/examples/bdd/step_defs/data_table_parameter_steps.py b/examples/bdd/step_defs/data_table_parameter_steps.py new file mode 100644 index 0000000..c115a67 --- /dev/null +++ b/examples/bdd/step_defs/data_table_parameter_steps.py @@ -0,0 +1,33 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict + +from pytest_bdd import given, parsers, scenarios + +# Import the scenario from the feature file +scenarios("../features/data_table_parameter.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("a step with a data table:") +def step_with_data_table(datatable: Dict[str, str]) -> None: + """Step that receives a data table and logs its content. + + :param datatable: Data table from the feature file + """ + LOGGER.info("Data table content: %s", datatable) diff --git a/examples/bdd/step_defs/doc_string_parameters_steps.py b/examples/bdd/step_defs/doc_string_parameters_steps.py new file mode 100644 index 0000000..8435808 --- /dev/null +++ b/examples/bdd/step_defs/doc_string_parameters_steps.py @@ -0,0 +1,32 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, scenarios, when + +# Import the scenario from the feature file +scenarios("../features/doc_string_parameters.feature") + + +LOGGER = logging.getLogger(__name__) + + +@given("I have a docstring parameter:") +def step_with_docstring(docstring: str) -> None: + """Step that receives a docstring and logs its content. + + :param docstring: Multi-line docstring from the feature file + """ + LOGGER.info("Docstring content: %s", docstring) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 2379c92..61beed5 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -973,3 +973,64 @@ def test_scenario_outline_fail(mock_client_init): assert final_error_log["level"] == "ERROR" assert final_error_log["message"].endswith("AssertionError") assert final_error_log["item_id"] == scenario_call_1[1]["name"] + "_2" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_doc_string_parameters(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/doc_string_parameters_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify scenario + scenario_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call[1]["name"] + == "Feature: Basic test with a docstring parameter - Scenario: Test with a docstring parameter" + ) + assert scenario_call[1]["item_type"] == "STEP" + assert scenario_call[1].get("has_stats", True) + assert ( + scenario_call[1]["code_ref"] + == "features/doc_string_parameters.feature/[SCENARIO:Test with a docstring parameter]" + ) + + # Verify step + given_step = mock_client.start_test_item.call_args_list[1] + assert given_step[0][0] == "Given I have a docstring parameter:" + assert given_step[0][2] == "step" + assert given_step[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + assert given_step[1]["has_stats"] is False + + # Verify steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_data_table_parameter_steps(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/data_table_parameter_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify scenario + scenario_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call[1]["name"] + == "Feature: A basic test with a Data Table parameter - Scenario: Test with Data Table" + ) + assert scenario_call[1]["item_type"] == "STEP" + assert scenario_call[1].get("has_stats", True) + assert scenario_call[1]["code_ref"] == "features/data_table_parameter.feature/[SCENARIO:Test with Data Table]" + + # Verify step + given_step = mock_client.start_test_item.call_args_list[1] + assert given_step[0][0] == "Given a step with a data table:" + assert given_step[0][2] == "step" + assert given_step[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + assert given_step[1]["has_stats"] is False + + # Verify steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" From 4f4bbc2d3f5d9d92c25a6c15f8a9c39229aac34a Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 18:16:19 +0300 Subject: [PATCH 099/110] Fix fixture reporting --- pytest_reportportal/service.py | 2 +- tests/integration/test_fixtures.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 960ae56..a6b4f4a 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1060,7 +1060,7 @@ def report_fixture(self, name: str, error_msg: str) -> Generator[None, Any, None try: outcome = yield exc_info = outcome.excinfo - exception = exc_info[1] + exception = exc_info[1] if exc_info else None status = "PASSED" if exception: if type(exception).__name__ != "Skipped": diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index 134cd26..ce6a704 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -90,10 +90,13 @@ def test_fixture_on_off(mock_client_init, switch): assert int(result) == 0, "Exit code should be 0 (no errors)" start_count = mock_client.start_test_item.call_count + finish_calls = mock_client.finish_test_item.call_args_list finish_count = mock_client.finish_test_item.call_count expected_count = 3 if switch else 1 assert start_count == expected_count, 'Incorrect number of "start_test_item" calls' assert finish_count == expected_count, 'Incorrect number of "finish_test_item" calls' + for call in finish_calls: + assert call[1]["status"] == "PASSED" def run_tests(test_path, should_fail=False): From 973ab309b0361d6c02b0e2160693c93db875c849 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 18:20:43 +0300 Subject: [PATCH 100/110] Disable some test for old version of pytest_bdd --- tests/integration/test_bdd.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 61beed5..908ac88 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -853,6 +853,7 @@ def test_rule_description(mock_client_init): assert rule_call[1]["item_type"] == "SUITE" +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) def test_scenario_outline_dynamic_name(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) @@ -975,6 +976,7 @@ def test_scenario_outline_fail(mock_client_init): assert final_error_log["item_id"] == scenario_call_1[1]["name"] + "_2" +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) def test_doc_string_parameters(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) @@ -1007,6 +1009,7 @@ def test_doc_string_parameters(mock_client_init): assert call[1]["status"] == "PASSED" +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) def test_data_table_parameter_steps(mock_client_init): mock_client = setup_mock_for_logging(mock_client_init) From 928617e52301e08a6f882b9ae80d2fe6ead884fe Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 18:23:41 +0300 Subject: [PATCH 101/110] Fix imports --- examples/bdd/step_defs/data_table_parameter_steps.py | 2 +- examples/bdd/step_defs/doc_string_parameters_steps.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/bdd/step_defs/data_table_parameter_steps.py b/examples/bdd/step_defs/data_table_parameter_steps.py index c115a67..be4aceb 100644 --- a/examples/bdd/step_defs/data_table_parameter_steps.py +++ b/examples/bdd/step_defs/data_table_parameter_steps.py @@ -15,7 +15,7 @@ import logging from typing import Dict -from pytest_bdd import given, parsers, scenarios +from pytest_bdd import given, scenarios # Import the scenario from the feature file scenarios("../features/data_table_parameter.feature") diff --git a/examples/bdd/step_defs/doc_string_parameters_steps.py b/examples/bdd/step_defs/doc_string_parameters_steps.py index 8435808..a319ab9 100644 --- a/examples/bdd/step_defs/doc_string_parameters_steps.py +++ b/examples/bdd/step_defs/doc_string_parameters_steps.py @@ -14,7 +14,7 @@ import logging -from pytest_bdd import given, scenarios, when +from pytest_bdd import given, scenarios # Import the scenario from the feature file scenarios("../features/doc_string_parameters.feature") From 23d21a905d3d57a222e5a808d0cb46c5cdafc59d Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Thu, 27 Feb 2025 18:31:03 +0300 Subject: [PATCH 102/110] Small refactoring --- pytest_reportportal/service.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index a6b4f4a..6680ce5 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1225,6 +1225,22 @@ def _finish_bdd_step(self, leaf: Dict[str, Any], status: str) -> None: reporter.finish_nested_step(item_id, timestamp(), status) leaf["exec"] = ExecStatus.FINISHED + def _is_background_step(self, step: Step, feature: Feature) -> bool: + """Check if step belongs to feature background. + + :param step: Current step + :param feature: Current feature + :return: True if step is from background, False otherwise + """ + if not feature.background: + return False + + background_steps = feature.background.steps + return any( + s.name == step.name and s.keyword == step.keyword and s.line_number == step.line_number + for s in background_steps + ) + @check_rp_enabled def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> None: """Start BDD step. @@ -1244,16 +1260,7 @@ def start_bdd_step(self, feature: Feature, scenario: Scenario, step: Step) -> No scenario_leaf["exec"] = ExecStatus.IN_PROGRESS reporter = self.rp.step_reporter step_leaf = self._create_leaf(LeafType.NESTED, scenario_leaf, step) - background_steps = [] - if feature.background: - background_steps = feature.background.steps - if next( - filter( - lambda s: s.name == step.name and s.keyword == step.keyword and s.line_number == step.line_number, - background_steps, - ), - None, - ): + if self._is_background_step(step, feature): background_leaf = scenario_leaf["children"][feature.background] background_leaf["children"][step] = step_leaf if background_leaf["exec"] != ExecStatus.IN_PROGRESS: From 67b8e57e4e9fa2a441b7a4ce8d1cea9cbfd9b2d9 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 28 Feb 2025 14:39:17 +0300 Subject: [PATCH 103/110] Add custom Test Case ID --- .../bdd/features/custom_test_case_id.feature | 5 ++ .../scenario_outline_test_case_id.feature | 11 ++++ .../step_defs/custom_test_case_id_steps.py | 27 +++++++++ .../scenario_outline_test_case_id_steps.py | 37 ++++++++++++ pytest_reportportal/service.py | 14 ++++- tests/integration/test_bdd.py | 57 +++++++++++++++++++ 6 files changed, 149 insertions(+), 2 deletions(-) create mode 100644 examples/bdd/features/custom_test_case_id.feature create mode 100644 examples/bdd/features/scenario_outline_test_case_id.feature create mode 100644 examples/bdd/step_defs/custom_test_case_id_steps.py create mode 100644 examples/bdd/step_defs/scenario_outline_test_case_id_steps.py diff --git a/examples/bdd/features/custom_test_case_id.feature b/examples/bdd/features/custom_test_case_id.feature new file mode 100644 index 0000000..9d6618d --- /dev/null +++ b/examples/bdd/features/custom_test_case_id.feature @@ -0,0 +1,5 @@ +Feature: Test dummy scenario + + @tc_id:my_tc_id + Scenario: The scenario + Given I have empty step diff --git a/examples/bdd/features/scenario_outline_test_case_id.feature b/examples/bdd/features/scenario_outline_test_case_id.feature new file mode 100644 index 0000000..b14a57e --- /dev/null +++ b/examples/bdd/features/scenario_outline_test_case_id.feature @@ -0,0 +1,11 @@ +Feature: Basic test with parameters + + @tc_id:outline_tc_id + Scenario Outline: Test with different parameters + Given It is test with parameters + When I have parameter + Then I emit number on level info + + Examples: + | str | parameters | + | "first" | 123 | diff --git a/examples/bdd/step_defs/custom_test_case_id_steps.py b/examples/bdd/step_defs/custom_test_case_id_steps.py new file mode 100644 index 0000000..6528831 --- /dev/null +++ b/examples/bdd/step_defs/custom_test_case_id_steps.py @@ -0,0 +1,27 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, scenarios + +# Import the scenario from the feature file +scenarios("../features/custom_test_case_id.feature") + +LOGGER = logging.getLogger(__name__) + + +@given("I have empty step") +def step_with_custom_test_case_id(): + LOGGER.info("I have empty step") diff --git a/examples/bdd/step_defs/scenario_outline_test_case_id_steps.py b/examples/bdd/step_defs/scenario_outline_test_case_id_steps.py new file mode 100644 index 0000000..d5174aa --- /dev/null +++ b/examples/bdd/step_defs/scenario_outline_test_case_id_steps.py @@ -0,0 +1,37 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from pytest_bdd import given, parsers, scenarios, then, when + +# Import the scenario from the feature file +scenarios("../features/scenario_outline_test_case_id.feature") + +LOGGER = logging.getLogger(__name__) + + +@given("It is test with parameters") +def step_with_parameters(): + LOGGER.info("It is test with parameters") + + +@when(parsers.parse('I have parameter "{parameter}"')) +def have_parameter_str(parameter: str): + LOGGER.info("String parameter %s", parameter) + + +@then(parsers.parse("I emit number {parameters:d} on level info")) +def emit_number_info(parameters): + LOGGER.info("Test with parameters: %d", parameters) diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 6680ce5..10eb8f9 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -1189,6 +1189,16 @@ def _get_scenario_code_ref(self, scenario: Scenario, scenario_template: Optional return code_ref def _get_scenario_test_case_id(self, leaf: Dict[str, Any]) -> str: + attributes = leaf.get("attributes", []) + params: Optional[Dict[str, str]] = leaf.get("parameters", None) + for attribute in attributes: + if attribute.get("key", None) == "tc_id": + tc_id = attribute["value"] + params_str = "" + if params: + params_str = ";".join([f"{k}:{v}" for k, v in sorted(params.items())]) + params_str = f"[{params_str}]" + return f"{tc_id}{params_str}" return leaf["code_ref"] def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: @@ -1203,7 +1213,7 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: ).rstrip("\n") leaf["description"] = description if description else None scenario_template = self._get_scenario_template(scenario) - if scenario_template: + if scenario_template and scenario_template.templated: parameters = self._get_scenario_parameters_from_template(scenario) leaf["parameters"] = parameters if parameters: @@ -1213,8 +1223,8 @@ def _process_scenario_metadata(self, leaf: Dict[str, Any]) -> None: else: leaf["description"] = parameters_str leaf["code_ref"] = self._get_scenario_code_ref(scenario, scenario_template) - leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) leaf["attributes"] = self._process_bdd_attributes(scenario) + leaf["test_case_id"] = self._get_scenario_test_case_id(leaf) def _finish_bdd_step(self, leaf: Dict[str, Any], status: str) -> None: if leaf["exec"] != ExecStatus.IN_PROGRESS: diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 908ac88..ca894f9 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -1037,3 +1037,60 @@ def test_data_table_parameter_steps(mock_client_init): finish_calls = mock_client.finish_test_item.call_args_list for call in finish_calls: assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_scenario_outline_test_case_id(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_test_case_id_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify first scenario with parameters includes the test case ID + scenario_call = mock_client.start_test_item.call_args_list[0] + assert ( + scenario_call[1]["name"] + == "Feature: Basic test with parameters - Scenario Outline: Test with different parameters" + ) + assert scenario_call[1]["item_type"] == "STEP" + assert ( + scenario_call[1]["code_ref"] + == "features/scenario_outline_test_case_id.feature/[EXAMPLE:Test with different parameters" + '[parameters:123;str:"first"]]' + ) + assert scenario_call[1]["parameters"] == {"str": '"first"', "parameters": "123"} + + # Verify the test case ID is correctly reported using the tag instead of code_ref + assert scenario_call[1]["test_case_id"] == 'outline_tc_id[parameters:123;str:"first"]' + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_custom_test_case_id(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/custom_test_case_id_steps.py"]) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + # Verify scenario includes the test case ID + scenario_call = mock_client.start_test_item.call_args_list[0] + assert scenario_call[1]["name"] == "Feature: Test dummy scenario - Scenario: The scenario" + assert scenario_call[1]["item_type"] == "STEP" + assert scenario_call[1]["code_ref"] == "features/custom_test_case_id.feature/[SCENARIO:The scenario]" + + # Verify the test case ID is correctly reported using the tag instead of code_ref + assert scenario_call[1]["test_case_id"] == "my_tc_id" + + # Verify step info + step_call = mock_client.start_test_item.call_args_list[1] + assert step_call[0][0] == "Given I have empty step" + assert step_call[0][2] == "step" + assert step_call[1]["parent_item_id"] == scenario_call[1]["name"] + "_1" + assert step_call[1]["has_stats"] is False + + # Verify all steps pass + finish_calls = mock_client.finish_test_item.call_args_list + for call in finish_calls: + assert call[1]["status"] == "PASSED" From 35170f3f9cbd6d0e4d61b7f86e964091678bd3c7 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 28 Feb 2025 14:48:03 +0300 Subject: [PATCH 104/110] Add run with no pytest-bdd --- requirements-dev-bdd.txt | 1 + requirements-dev.txt | 1 - tox.ini | 14 +++++++++++++- 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 requirements-dev-bdd.txt diff --git a/requirements-dev-bdd.txt b/requirements-dev-bdd.txt new file mode 100644 index 0000000..472e2bd --- /dev/null +++ b/requirements-dev-bdd.txt @@ -0,0 +1 @@ +pytest-bdd>=7.2.0 diff --git a/requirements-dev.txt b/requirements-dev.txt index f4b63b7..8cddeb9 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,4 +1,3 @@ delayed-assert pytest-cov pytest-parallel -pytest-bdd>=7.2.0 diff --git a/tox.ini b/tox.ini index 666426f..12244b7 100644 --- a/tox.ini +++ b/tox.ini @@ -2,6 +2,7 @@ isolated_build = True envlist = pep + nobdd py38 py39 py310 @@ -13,12 +14,23 @@ envlist = deps = -rrequirements.txt -rrequirements-dev.txt + -rrequirements-dev-bdd.txt setenv = AGENT_NO_ANALYTICS = 1 commands = pytest --cov={envsitepackagesdir}/pytest_reportportal --cov-report=xml tests/ -s -vv +[testenv:nobdd] +deps = + -rrequirements.txt + -rrequirements-dev.txt + +setenv = + AGENT_NO_ANALYTICS = 1 + +commands = pytest --cov={envsitepackagesdir}/pytest_reportportal --cov-report=xml tests/ -s -vv --ignore tests/integration/test_bdd.py + [testenv:pep] skip_install = True deps = pre-commit>=1.19.0 @@ -28,7 +40,7 @@ commands = pre-commit run --all-files --show-diff-on-failure python = 3.8: py38 3.9: py39 - 3.10: pep, py310 + 3.10: pep, nobdd, py310 3.11: py311 3.12: py312 3.13: py313 From 3d6c9d04cde2f54daa2c38fc32638281375c3aeb Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 28 Feb 2025 14:50:07 +0300 Subject: [PATCH 105/110] Remove cov from nobdd --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 12244b7..3d7438b 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ deps = setenv = AGENT_NO_ANALYTICS = 1 -commands = pytest --cov={envsitepackagesdir}/pytest_reportportal --cov-report=xml tests/ -s -vv +commands = pytest --cov={envsitepackagesdir}/pytest_reportportal tests/ -s -vv [testenv:nobdd] deps = From 0fc4d77a5039935ebb40d438f381102e6b28e8cd Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 28 Feb 2025 14:50:25 +0300 Subject: [PATCH 106/110] Remove cov from nobdd --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 3d7438b..101d792 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ deps = setenv = AGENT_NO_ANALYTICS = 1 -commands = pytest --cov={envsitepackagesdir}/pytest_reportportal tests/ -s -vv +commands = pytest tests/ -s -vv [testenv:nobdd] deps = From 2daf3aa0debec8634878895b40a44087c93665a7 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 28 Feb 2025 14:50:48 +0300 Subject: [PATCH 107/110] Remove cov from nobdd --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 101d792..43fd5f3 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ deps = setenv = AGENT_NO_ANALYTICS = 1 -commands = pytest tests/ -s -vv +commands = pytest --cov={envsitepackagesdir}/pytest_reportportal --cov-report=xml tests/ -s -vv [testenv:nobdd] deps = @@ -29,7 +29,7 @@ deps = setenv = AGENT_NO_ANALYTICS = 1 -commands = pytest --cov={envsitepackagesdir}/pytest_reportportal --cov-report=xml tests/ -s -vv --ignore tests/integration/test_bdd.py +commands = pytest tests/ -s -vv --ignore tests/integration/test_bdd.py [testenv:pep] skip_install = True From 986030a57e934d8577c4fc05b87cb4ad325e47d5 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 28 Feb 2025 17:07:01 +0300 Subject: [PATCH 108/110] Fixes #389 --- pytest_reportportal/config.py | 6 ++- pytest_reportportal/service.py | 31 ++++++------ requirements.txt | 2 +- tests/integration/test_attributes.py | 44 ++++++++++++++++ tests/integration/test_bdd.py | 75 ++++++++++++++++++++-------- 5 files changed, 118 insertions(+), 40 deletions(-) diff --git a/pytest_reportportal/config.py b/pytest_reportportal/config.py index 26d6b75..092fe87 100644 --- a/pytest_reportportal/config.py +++ b/pytest_reportportal/config.py @@ -15,7 +15,7 @@ import warnings from os import getenv -from typing import Any, Optional, Tuple, Union +from typing import Any, List, Optional, Tuple, Union from _pytest.config import Config from reportportal_client import ClientType, OutputType @@ -49,7 +49,8 @@ class AgentConfig: rp_bts_url: str rp_launch: str rp_launch_id: Optional[str] - rp_launch_attributes: Optional[list] + rp_launch_attributes: Optional[List[str]] + rp_tests_attributes: Optional[List[str]] rp_launch_description: str rp_log_batch_size: int rp_log_batch_payload_size: int @@ -96,6 +97,7 @@ def __init__(self, pytest_config: Config) -> None: self.rp_launch = self.find_option(pytest_config, "rp_launch") self.rp_launch_id = self.find_option(pytest_config, "rp_launch_id") self.rp_launch_attributes = self.find_option(pytest_config, "rp_launch_attributes") + self.rp_tests_attributes = self.find_option(pytest_config, "rp_tests_attributes") self.rp_launch_description = self.find_option(pytest_config, "rp_launch_description") self.rp_log_batch_size = int(self.find_option(pytest_config, "rp_log_batch_size")) batch_payload_size = self.find_option(pytest_config, "rp_log_batch_payload_size") diff --git a/pytest_reportportal/service.py b/pytest_reportportal/service.py index 10eb8f9..43be6ad 100644 --- a/pytest_reportportal/service.py +++ b/pytest_reportportal/service.py @@ -90,7 +90,6 @@ ISSUE_DESCRIPTION_ID_TEMPLATE: str = " {issue_id}" PYTHON_REPLACE_REGEX = re.compile(r"\W") ALPHA_REGEX = re.compile(r"^\d+_*") -ATTRIBUTE_DELIMITER = ":" BACKGROUND_STEP_NAME = "Background" @@ -505,11 +504,13 @@ def _lock(self, leaf: Dict[str, Any], func: Callable[[Dict[str, Any]], Any]) -> return func(leaf) return func(leaf) - def _process_bdd_attributes(self, scenario: Union[Feature, Scenario, Rule]) -> List[Dict[str, str]]: + def _process_bdd_attributes(self, item: Union[Feature, Scenario, Rule]) -> List[Dict[str, str]]: tags = [] - tags.extend(scenario.tags) - if isinstance(scenario, Scenario): - template = self._get_scenario_template(scenario) + tags.extend(item.tags) + if isinstance(item, Scenario): + test_attributes = self._config.rp_tests_attributes + tags.extend(test_attributes if test_attributes else []) + template = self._get_scenario_template(item) if template and template.templated: examples = [] if isinstance(template.examples, list): @@ -518,17 +519,7 @@ def _process_bdd_attributes(self, scenario: Union[Feature, Scenario, Rule]) -> L examples.append(template.examples) for example in examples: tags.extend(getattr(example, "tags", [])) - attributes = [] - for tag in tags: - key = None - value = tag - if ATTRIBUTE_DELIMITER in tag: - key, value = tag.split(ATTRIBUTE_DELIMITER, 1) - attribute = {"value": value} - if key: - attribute["key"] = key - attributes.append(attribute) - return attributes + return gen_attributes(tags) def _get_suite_code_ref(self, leaf: Dict[str, Any]) -> str: item = leaf["item"] @@ -768,7 +759,13 @@ def _process_attributes(self, item: Item) -> List[Dict[str, Any]]: :param item: Pytest.Item :return: a set of attributes """ - attributes = set() + test_attributes = self._config.rp_tests_attributes + if test_attributes: + attributes = { + (attr.get("key", None), attr["value"]) for attr in gen_attributes(self._config.rp_tests_attributes) + } + else: + attributes = set() for marker in item.iter_markers(): if marker.name == "issue": if self._config.rp_issue_id_marks: diff --git a/requirements.txt b/requirements.txt index e53c020..ea8ffec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ dill>=0.3.6 pytest>=4.6.10 -reportportal-client~=5.6.1 +reportportal-client~=5.6.2 aenum>=3.1.0 diff --git a/tests/integration/test_attributes.py b/tests/integration/test_attributes.py index d8fb81a..a09e58c 100644 --- a/tests/integration/test_attributes.py +++ b/tests/integration/test_attributes.py @@ -15,6 +15,8 @@ from unittest import mock +import pytest + from tests import REPORT_PORTAL_SERVICE from tests.helpers import utils @@ -124,3 +126,45 @@ def test_custom_runtime_attribute_report(mock_client_init): attribute_tuple_list = [(kv.get("key"), kv["value"]) for kv in actual_attributes] assert set(attribute_tuple_list) == {("scope", "smoke"), (None, "runtime")} + + +@pytest.mark.parametrize("rp_hierarchy_code", [True, False]) +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes(mock_client_init, rp_hierarchy_code): + """Verify configuration attributes are reported. + + :param mock_client_init: Pytest fixture + """ + variables = {"rp_tests_attributes": "test_key:test_value", "rp_hierarchy_code": rp_hierarchy_code} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/test_simple.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + mock_client = mock_client_init.return_value + assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' + + call_args = mock_client.start_test_item.call_args_list + step_call_args = call_args[-1][1] + assert step_call_args["attributes"] == [{"key": "test_key", "value": "test_value"}] + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes_add(mock_client_init): + """Verify configuration attributes are reported along with custom attribute. + + :param mock_client_init: Pytest fixture + """ + variables = {"markers": "scope: to which test scope a test relates", "rp_tests_attributes": "test_key:test_value"} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/attributes/test_one_attribute.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + mock_client = mock_client_init.return_value + assert mock_client.start_test_item.call_count > 0, '"start_test_item" called incorrect number of times' + + call_args = mock_client.start_test_item.call_args_list + step_call_args = call_args[-1][1] + attributes = step_call_args["attributes"] + assert len(attributes) == 2 + assert {"key": "scope", "value": "smoke"} in attributes + assert {"key": "test_key", "value": "test_value"} in attributes diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index ca894f9..00f51d2 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -82,8 +82,7 @@ def setup_mock_for_logging(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_basic(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"]) assert int(result) == 0, "Exit code should be 0 (no errors)" @@ -124,8 +123,7 @@ def test_basic(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_basic_with_feature_suite(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) variables = {"rp_hierarchy_code": True} variables.update(utils.DEFAULT_VARIABLES.items()) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_arguments.py"], variables=variables) @@ -189,8 +187,7 @@ def test_feature_descriptions(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_failed_feature(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_failed_step.py"]) assert int(result) == 1, "Exit code should be 1 (test error)" @@ -224,8 +221,7 @@ def test_failed_feature(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_scenario_attributes(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) test_file = "examples/bdd/step_defs/test_belly.py" result = utils.run_pytest_tests(tests=[test_file]) @@ -241,8 +237,7 @@ def test_scenario_attributes(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_feature_attributes(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) variables = {"rp_hierarchy_code": True} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -268,8 +263,7 @@ def test_feature_attributes(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_background_step(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) test_file = "examples/bdd/step_defs/test_background.py" result = utils.run_pytest_tests(tests=[test_file]) @@ -332,8 +326,7 @@ def test_background_step(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_background_two_steps(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) test_file = "examples/bdd/step_defs/test_background_two_steps.py" result = utils.run_pytest_tests(tests=[test_file]) @@ -379,8 +372,7 @@ def test_background_two_steps(mock_client_init): @pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) def test_rule(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_rule_steps.py"]) assert int(result) == 0, "Exit code should be 0 (no errors)" @@ -474,8 +466,7 @@ def test_rule(mock_client_init): @pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @mock.patch(REPORT_PORTAL_SERVICE) def test_rule_hierarchy(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) variables = {"rp_hierarchy_code": True} variables.update(utils.DEFAULT_VARIABLES.items()) @@ -899,8 +890,7 @@ def test_scenario_outline_dynamic_name(mock_client_init): @mock.patch(REPORT_PORTAL_SERVICE) def test_scenario_outline_fail(mock_client_init): - mock_client = setup_mock(mock_client_init) - setup_mock_for_logging(mock_client_init) + mock_client = setup_mock_for_logging(mock_client_init) result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/scenario_outline_fail_steps.py"]) assert int(result) == 1, "Exit code should be 1 (test error)" @@ -1094,3 +1084,48 @@ def test_custom_test_case_id(mock_client_init): finish_calls = mock_client.finish_test_item.call_args_list for call in finish_calls: assert call[1]["status"] == "PASSED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes_rule(mock_client_init): + mock_client = setup_mock(mock_client_init) + variables = {"rp_tests_attributes": "test_key:test_value"} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_rule_description_steps.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call = mock_client.start_test_item.call_args_list[0] + assert scenario_call[1]["attributes"] == [{"key": "test_key", "value": "test_value"}] + + +@pytest.mark.parametrize("rp_hierarchy_code, scenario_idx", [(True, 2), (False, 0)]) +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes_rule_hierarchy(mock_client_init, rp_hierarchy_code, scenario_idx): + mock_client = setup_mock(mock_client_init) + variables = {"rp_tests_attributes": "test_key:test_value", "rp_hierarchy_code": rp_hierarchy_code} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests( + tests=["examples/bdd/step_defs/test_rule_description_steps.py"], variables=variables + ) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call = mock_client.start_test_item.call_args_list[scenario_idx] + assert scenario_call[1]["attributes"] == [{"key": "test_key", "value": "test_value"}] + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_rp_tests_attributes_bdd_tags(mock_client_init): + mock_client = setup_mock(mock_client_init) + variables = {"rp_tests_attributes": "test_key:test_value"} + variables.update(utils.DEFAULT_VARIABLES.items()) + result = utils.run_pytest_tests(tests=["examples/bdd/step_defs/test_belly.py"], variables=variables) + assert int(result) == 0, "Exit code should be 0 (no errors)" + + scenario_call = mock_client.start_test_item.call_args_list[0] + attributes = scenario_call[1]["attributes"] + assert len(attributes) == 3 + assert {"key": "test_key", "value": "test_value"} in attributes + assert {"value": "ok"} in attributes + assert {"key": "key", "value": "value"} in attributes From 269f21beacf60facd170888f5872b921192b9c90 Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 28 Feb 2025 17:36:31 +0300 Subject: [PATCH 109/110] Fixes #390 --- .../test_fixture_exit/test_fixture_exit.py | 24 +++++++++++++++++++ tests/integration/test_fixtures.py | 17 +++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 examples/fixtures/test_fixture_exit/test_fixture_exit.py diff --git a/examples/fixtures/test_fixture_exit/test_fixture_exit.py b/examples/fixtures/test_fixture_exit/test_fixture_exit.py new file mode 100644 index 0000000..a7a0208 --- /dev/null +++ b/examples/fixtures/test_fixture_exit/test_fixture_exit.py @@ -0,0 +1,24 @@ +# Copyright 2025 EPAM Systems +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + + +@pytest.fixture +def fixture_demo(): + pytest.exit("Some Message") + + +def test_exit(fixture_demo): + assert True diff --git a/tests/integration/test_fixtures.py b/tests/integration/test_fixtures.py index ce6a704..f685f0f 100644 --- a/tests/integration/test_fixtures.py +++ b/tests/integration/test_fixtures.py @@ -587,3 +587,20 @@ def test_fixture_setup_skip(mock_client_init): finish_call_kwargs = call_args[-1][1] assert finish_call_kwargs["status"] == "SKIPPED" + + +@mock.patch(REPORT_PORTAL_SERVICE) +def test_fixture_exit(mock_client_init): + mock_client = setup_mock_for_logging(mock_client_init) + + test_path = "examples/fixtures/test_fixture_exit/test_fixture_exit.py" + variables = dict(utils.DEFAULT_VARIABLES) + variables["rp_report_fixtures"] = True + result = utils.run_pytest_tests(tests=[test_path], variables=variables) + assert int(result) == 2, "Exit code should be 2 (unexpected exit)" + + call_args = mock_client.start_test_item.call_args_list + assert len(call_args) == 2, 'Incorrect number of "start_test_item" calls' + + call_args = mock_client.finish_test_item.call_args_list + assert len(call_args) == 2, 'Incorrect number of "finish_test_item" calls' From db108122e5ea2a0cc7cb1570b2520742826b735a Mon Sep 17 00:00:00 2001 From: Vadzim Hushchanskou Date: Fri, 28 Feb 2025 17:44:22 +0300 Subject: [PATCH 110/110] Fixes for Python 3.8 --- tests/integration/test_bdd.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_bdd.py b/tests/integration/test_bdd.py index 00f51d2..beb9584 100644 --- a/tests/integration/test_bdd.py +++ b/tests/integration/test_bdd.py @@ -1100,6 +1100,7 @@ def test_rp_tests_attributes_rule(mock_client_init): assert scenario_call[1]["attributes"] == [{"key": "test_key", "value": "test_value"}] +@pytest.mark.skipif(pytest_bdd_version[0] < 8, reason="Only for pytest-bdd 8+") @pytest.mark.parametrize("rp_hierarchy_code, scenario_idx", [(True, 2), (False, 0)]) @mock.patch(REPORT_PORTAL_SERVICE) def test_rp_tests_attributes_rule_hierarchy(mock_client_init, rp_hierarchy_code, scenario_idx):